From 70d7b4e89ab34c814613ebdc3de6ce26a7d17a3e Mon Sep 17 00:00:00 2001 From: Gary Belvin Date: Wed, 5 Jul 2017 13:39:04 +0100 Subject: [PATCH 01/12] Add index to HStar2Nodes. Tests pass --- merkle/hstar2.go | 115 ++++++++++++++++-------------- merkle/hstar2_test.go | 30 ++++---- merkle/sparse_merkle_tree.go | 18 +++-- merkle/sparse_merkle_tree_test.go | 4 +- storage/types.go | 5 ++ 5 files changed, 94 insertions(+), 78 deletions(-) diff --git a/merkle/hstar2.go b/merkle/hstar2.go index 1cbfee32ed..a241ff03ff 100644 --- a/merkle/hstar2.go +++ b/merkle/hstar2.go @@ -17,6 +17,7 @@ package merkle import ( "errors" "fmt" + "log" "math/big" "sort" @@ -54,15 +55,11 @@ func NewHStar2(treeID int64, hasher hashers.MapHasher) HStar2 { } } -// HStar2Root calculates the root of a sparse Merkle tree of depth n which contains -// the given set of non-null leaves. -func (s *HStar2) HStar2Root(n int, values []HStar2LeafHash) ([]byte, error) { +// HStar2Root calculates the root of a sparse Merkle tree of a given depth +// which contains the given set of non-null leaves. +func (s *HStar2) HStar2Root(depth int, values []HStar2LeafHash) ([]byte, error) { sort.Sort(ByIndex{values}) - return s.hStar2b(n, values, smtZero, - func(depth int, index *big.Int) ([]byte, error) { - return s.hasher.HashEmpty(s.treeID, PaddedBytes(index, s.hasher.Size()), depth), nil - }, - func(int, *big.Int, []byte) error { return nil }) + return s.hStar2b(0, depth, values, smtZero, nil, nil) } // SparseGetNodeFunc should return any pre-existing node hash for the node address. @@ -71,83 +68,93 @@ type SparseGetNodeFunc func(depth int, index *big.Int) ([]byte, error) // SparseSetNodeFunc should store the passed node hash, associating it with the address. type SparseSetNodeFunc func(depth int, index *big.Int, hash []byte) error -// HStar2Nodes calculates the root hash of a pre-existing sparse Merkle tree -// plus the extra values passed in. Get and set are used to fetch and store -// internal node values. Values must not contain multiple leaves for the same -// index. +// HStar2Nodes calculates the root hash of a pre-existing sparse Merkle tree (SMT). +// HStar2Nodes can also calculate the root nodes of subtrees inside a SMT. +// Get and set are used to fetch and store internal node values. +// Values must not contain multiple leaves for the same index. // -// The treeLevelOffset argument is used when the tree to be calculated is part -// of a larger tree. It identifes the level in the larger tree at which the -// root of the subtree being calculated is found. -// e.g. Imagine a tree 256 levels deep, and that you already (somehow) happen -// to have the intermediate hash values for the non-null nodes 8 levels below -// the root already calculated (i.e. you just need to calculate the top 8 -// levels of a 256-level tree). To do this, you'd set treeDepth=8, and -// treeLevelOffset=248 (256-8). -func (s *HStar2) HStar2Nodes(treeDepth, treeLevelOffset int, values []HStar2LeafHash, get SparseGetNodeFunc, set SparseSetNodeFunc) ([]byte, error) { +// prefix is the location of this subtree within the larger tree. Root is at nil. +// subtreeDepth is the number of levels in this subtree. +// The height of the whole tree is assumed to be hasher.BitLen() +func (s *HStar2) HStar2Nodes(prefix []byte, subtreeDepth int, values []HStar2LeafHash, + get SparseGetNodeFunc, set SparseSetNodeFunc) ([]byte, error) { if glog.V(3) { - glog.Infof("HStar2Nodes(%v, %v, %v)", treeDepth, treeLevelOffset, len(values)) + glog.Infof("HStar2Nodes(%x, %v, %v, %v)", prefix, depth, subtreeDepth, len(values)) for _, v := range values { glog.Infof(" %x: %x", v.Index.Bytes(), v.LeafHash) } } - if treeLevelOffset < 0 { + depth := len(prefix) * 8 + totalDepth := depth + subtreeDepth + if totalDepth > s.hasher.BitLen() { return nil, ErrNegativeTreeLevelOffset } sort.Sort(ByIndex{values}) - return s.hStar2b(treeDepth, values, smtZero, - func(depth int, index *big.Int) ([]byte, error) { - // if we've got a function for getting existing node values, try it: - h, err := get(treeDepth-depth, index) - if err != nil { - return nil, err - } - // if we got a value then we'll use that - if h != nil { - return h, nil - } - // otherwise just return the null hash for this level - return s.hasher.HashEmpty(s.treeID, PaddedBytes(index, s.hasher.Size()), depth+treeLevelOffset), nil - }, - func(depth int, index *big.Int, hash []byte) error { - return set(treeDepth-depth, index, hash) - }) + offset := new(big.Int).SetBytes(prefix) + offset = offset.Lsh(offset, uint(s.hasher.BitLen()-depth)) // shift prefix into place. + return s.hStar2b(depth, totalDepth, values, offset, get, set) } -// hStar2b is the recursive implementation for calculating a sparse Merkle tree -// root value. -func (s *HStar2) hStar2b(n int, values []HStar2LeafHash, offset *big.Int, get SparseGetNodeFunc, set SparseSetNodeFunc) ([]byte, error) { - if n == 0 { +// hStar2b computes a sparse Merkle tree root value recursively. +func (s *HStar2) hStar2b(depth, maxDepth int, values []HStar2LeafHash, offset *big.Int, + get SparseGetNodeFunc, set SparseSetNodeFunc) ([]byte, error) { + log.Printf("hStar2b(%3v, %3v, %2d values, %x)", depth, maxDepth, len(values), offset.Bytes()) + if depth == maxDepth { switch { case len(values) == 0: - return get(n, offset) - case len(values) != 1: + return s.get(offset, depth, get) + case len(values) == 1: + return values[0].LeafHash, nil + default: return nil, fmt.Errorf("hStar2b base case: len(values): %d, want 1", len(values)) } - return values[0].LeafHash, nil } if len(values) == 0 { - return get(n, offset) + return s.get(offset, depth, get) } - split := new(big.Int).Lsh(smtOne, uint(n-1)) + bitsLeft := maxDepth - depth + split := new(big.Int).Lsh(smtOne, uint(bitsLeft-1)) split.Add(split, offset) i := sort.Search(len(values), func(i int) bool { return values[i].Index.Cmp(split) >= 0 }) - lhs, err := s.hStar2b(n-1, values[:i], offset, get, set) + lhs, err := s.hStar2b(depth+1, maxDepth, values[:i], offset, get, set) if err != nil { return nil, err } - rhs, err := s.hStar2b(n-1, values[i:], split, get, set) + rhs, err := s.hStar2b(depth+1, maxDepth, values[i:], split, get, set) if err != nil { return nil, err } h := s.hasher.HashChildren(lhs, rhs) - if set != nil { - set(n, offset, h) - } + s.set(offset, depth, h, set) return h, nil } +// get attempts to use getter. If getter fails, returns the HashEmpty value. +func (s *HStar2) get(index *big.Int, depth int, getter SparseGetNodeFunc) ([]byte, error) { + // if we've got a function for getting existing node values, try it: + if getter != nil { + h, err := getter(depth, index) + if err != nil { + return nil, err + } + // if we got a value then we'll use that + if h != nil { + return h, nil + } + } + height := s.hasher.BitLen() - depth + return s.hasher.HashEmpty(s.treeID, PaddedBytes(index, s.hasher.Size()), height), nil +} + +// set attempts to use setter if it not nil. +func (s *HStar2) set(index *big.Int, depth int, hash []byte, setter SparseSetNodeFunc) error { + if setter != nil { + return setter(depth, index, hash) + } + return nil +} + // HStar2LeafHash sorting boilerplate below. // Leaves is a slice of HStar2LeafHash diff --git a/merkle/hstar2_test.go b/merkle/hstar2_test.go index f306668947..95de58cff5 100644 --- a/merkle/hstar2_test.go +++ b/merkle/hstar2_test.go @@ -87,7 +87,7 @@ func TestHStar2SimpleDataSetKAT(t *testing.T) { continue } if got, want := root, x.root; !bytes.Equal(got, want) { - t.Errorf("Root: \n%x, want:\n%x", got, want) + t.Errorf("Root: %x, want: %x", got, want) } } } @@ -107,7 +107,7 @@ func TestHStar2GetSet(t *testing.T) { if len(values) != 1 { t.Fatalf("Should only have 1 leaf per run, got %d", len(values)) } - root, err := s.HStar2Nodes(s.hasher.BitLen(), 0, values, + root, err := s.HStar2Nodes(nil, s.hasher.BitLen(), values, func(depth int, index *big.Int) ([]byte, error) { return cache[fmt.Sprintf("%x/%d", index, depth)], nil }, @@ -120,7 +120,7 @@ func TestHStar2GetSet(t *testing.T) { continue } if got, want := root, x.root; !bytes.Equal(got, want) { - t.Errorf("Root:\n%x, want:\n%x", got, want) + t.Errorf("Root: %x, want: %x", got, want) } } } @@ -132,18 +132,18 @@ func rootsForTrimmedKeys(t *testing.T, prefixSize int, lh []HStar2LeafHash) []HS var ret []HStar2LeafHash s := NewHStar2(treeID, maphasher.Default) for i := range lh { - prefix := new(big.Int).Rsh(lh[i].Index, uint(s.hasher.BitLen()-prefixSize)) - b := lh[i].Index.Bytes() - // ensure we've got any chopped of leading zero bytes - for len(b) < 32 { - b = append([]byte{0}, b...) + subtreeDepth := s.hasher.BitLen() - prefixSize + prefix := lh[i].Index.Bytes() + // ensure we've got any chopped off leading zero bytes + for len(prefix) < 32 { + prefix = append([]byte{0}, prefix...) } - lh[i].Index.SetBytes(b[prefixSize/8:]) - root, err := s.HStar2Root(s.hasher.BitLen()-prefixSize, []HStar2LeafHash{lh[i]}) + prefix = prefix[:prefixSize/8] // We only want the first prefixSize bytes. + root, err := s.HStar2Nodes(prefix, subtreeDepth, []HStar2LeafHash{lh[i]}, nil, nil) if err != nil { t.Fatalf("Failed to calculate root %v", err) } - ret = append(ret, HStar2LeafHash{prefix, root}) + ret = append(ret, HStar2LeafHash{new(big.Int).SetBytes(prefix), root}) } return ret } @@ -163,15 +163,13 @@ func TestHStar2OffsetRootKAT(t *testing.T) { leaves := createHStar2Leaves(treeID, maphasher.Default, iv...) intermediates := rootsForTrimmedKeys(t, size, leaves) - root, err := s.HStar2Nodes(size, s.hasher.BitLen()-size, intermediates, - func(int, *big.Int) ([]byte, error) { return nil, nil }, - func(int, *big.Int, []byte) error { return nil }) + root, err := s.HStar2Nodes(nil, size, intermediates, nil, nil) if err != nil { t.Errorf("Failed to calculate root at iteration %d: %v", i, err) continue } if got, want := root, x.root; !bytes.Equal(got, want) { - t.Errorf("Root: %x, want: %x", got, want) + t.Errorf("HStar2Nodes(i: %v, size:%v): %x, want: %x", i, size, got, want) } } } @@ -180,7 +178,7 @@ func TestHStar2OffsetRootKAT(t *testing.T) { func TestHStar2NegativeTreeLevelOffset(t *testing.T) { s := NewHStar2(treeID, maphasher.Default) - _, err := s.HStar2Nodes(32, -1, []HStar2LeafHash{}, + _, err := s.HStar2Nodes(make([]byte, 31), 9, []HStar2LeafHash{}, func(int, *big.Int) ([]byte, error) { return nil, nil }, func(int, *big.Int, []byte) error { return nil }) if got, want := err, ErrNegativeTreeLevelOffset; got != want { diff --git a/merkle/sparse_merkle_tree.go b/merkle/sparse_merkle_tree.go index a38903b04d..61da906998 100644 --- a/merkle/sparse_merkle_tree.go +++ b/merkle/sparse_merkle_tree.go @@ -56,6 +56,7 @@ type indexAndHash struct { // rootHashOrError represents a (sub-)tree root hash, or an error which // prevented the calculation from completing. +// TODO(gdbelvin): represent an empty subtree with a nil hash? type rootHashOrError struct { hash []byte err error @@ -170,7 +171,9 @@ func (s *subtreeWriter) SetLeaf(ctx context.Context, index []byte, hash []byte) return subtree.SetLeaf(ctx, index[s.subtreeDepth/8:], hash) case indexLen == s.subtreeDepth: - s.leafQueue <- func() (*indexAndHash, error) { return &indexAndHash{index: index, hash: hash}, nil } + s.leafQueue <- func() (*indexAndHash, error) { + return &indexAndHash{index: index, hash: hash}, nil + } return nil } @@ -209,21 +212,24 @@ func (s *subtreeWriter) buildSubtree(ctx context.Context) { s.root <- rootHashOrError{hash: nil, err: err} return } - leaves = append(leaves, HStar2LeafHash{Index: new(big.Int).SetBytes(ih.index), LeafHash: ih.hash}) + leaves = append(leaves, HStar2LeafHash{ + Index: new(big.Int).SetBytes(ih.index), + LeafHash: ih.hash, + }) nodesToStore = append(nodesToStore, storage.Node{ - NodeID: storage.NewNodeIDFromHash(bytes.Join([][]byte{s.prefix, ih.index}, []byte{})), + NodeID: storage.NewNodeIDFromHash( + bytes.Join([][]byte{s.prefix, ih.index}, []byte{})), Hash: ih.hash, NodeRevision: s.treeRevision, }) - } // calculate new root, and intermediate nodes: hs2 := NewHStar2(s.treeID, s.treeHasher) - treeDepthOffset := (s.treeHasher.Size()-len(s.prefix))*8 - s.subtreeDepth totalDepth := len(s.prefix)*8 + s.subtreeDepth - root, err := hs2.HStar2Nodes(s.subtreeDepth, treeDepthOffset, leaves, + prefixDepth := len(s.prefix) * 8 + root, err := hs2.HStar2Nodes(nil, prefixDepth, s.subtreeDepth, leaves, func(height int, index *big.Int) ([]byte, error) { nodeID := storage.NewNodeIDFromRelativeBigInt(s.prefix, s.subtreeDepth, height, index, totalDepth) glog.V(4).Infof("buildSubtree.get(%x, %d) nid: %x, %v", diff --git a/merkle/sparse_merkle_tree_test.go b/merkle/sparse_merkle_tree_test.go index b0202a1cc7..2262907e6e 100644 --- a/merkle/sparse_merkle_tree_test.go +++ b/merkle/sparse_merkle_tree_test.go @@ -369,8 +369,8 @@ func testSparseTreeCalculatedRootWithWriter(ctx context.Context, t *testing.T, r if err != nil { t.Fatalf("Failed to commit map changes: %v", err) } - if expected, got := vec.expectedRoot, root; !bytes.Equal(expected, got) { - t.Errorf("Expected root:\n%s, but got root:\n%s", base64.StdEncoding.EncodeToString(expected), base64.StdEncoding.EncodeToString(got)) + if got, want := root, vec.expectedRoot; !bytes.Equal(got, want) { + t.Errorf("got root: %x, want %x", got, want) } } diff --git a/storage/types.go b/storage/types.go index ee08149dd5..e532b73d36 100644 --- a/storage/types.go +++ b/storage/types.go @@ -369,6 +369,11 @@ func (n *NodeID) Equivalent(other NodeID) bool { return n.String() == other.String() } +// Equal returns true iff a and b have the same string representation. +func Equal(a, b *NodeID) bool { + return a.String() == b.String() +} + // PopulateSubtreeFunc is a function which knows how to re-populate a subtree // from just its leaf nodes. type PopulateSubtreeFunc func(*storagepb.SubtreeProto) error From 60470f0059db97b8216f5f5e4bf1784378698ec6 Mon Sep 17 00:00:00 2001 From: Gary Belvin Date: Wed, 5 Jul 2017 19:52:44 +0100 Subject: [PATCH 02/12] Use absolute indexes --- integration/maptest/map_test.go | 7 ++++ merkle/hstar2.go | 18 ++++---- merkle/hstar2_test.go | 11 ++++- merkle/sparse_merkle_tree.go | 67 ++++++++++++++++-------------- merkle/sparse_merkle_tree_test.go | 12 ++++-- storage/cache/map_subtree_cache.go | 3 +- storage/types.go | 5 --- 7 files changed, 69 insertions(+), 54 deletions(-) diff --git a/integration/maptest/map_test.go b/integration/maptest/map_test.go index 5eee5c2c97..a05bd07496 100644 --- a/integration/maptest/map_test.go +++ b/integration/maptest/map_test.go @@ -231,6 +231,13 @@ func TestInclusion(t *testing.T) { {Index: h2b("0000000000000000000000000000000000000000000000000000000000000002"), LeafValue: []byte("C")}, }, }, + { + desc: "CONIKS", + HashStrategy: trillian.HashStrategy_CONIKS_SHA512_256, + leaves: []*trillian.MapLeaf{ + {Index: h2b("4100000000000000000000000000000000000000000000000000000000000000"), LeafValue: []byte("A")}, + }, + }, } { tree, hasher, err := newTreeWithHasher(ctx, env, tc.HashStrategy) if err != nil { diff --git a/merkle/hstar2.go b/merkle/hstar2.go index a241ff03ff..3d882611e9 100644 --- a/merkle/hstar2.go +++ b/merkle/hstar2.go @@ -17,7 +17,6 @@ package merkle import ( "errors" "fmt" - "log" "math/big" "sort" @@ -68,18 +67,17 @@ type SparseGetNodeFunc func(depth int, index *big.Int) ([]byte, error) // SparseSetNodeFunc should store the passed node hash, associating it with the address. type SparseSetNodeFunc func(depth int, index *big.Int, hash []byte) error -// HStar2Nodes calculates the root hash of a pre-existing sparse Merkle tree (SMT). -// HStar2Nodes can also calculate the root nodes of subtrees inside a SMT. -// Get and set are used to fetch and store internal node values. -// Values must not contain multiple leaves for the same index. +// HStar2Nodes calculates the root hash of a pre-existing sparse Merkle tree +// plus the extra values passed in. Get and set are used to fetch and store +// internal node values. Values must not contain multiple leaves for the same +// index. // // prefix is the location of this subtree within the larger tree. Root is at nil. // subtreeDepth is the number of levels in this subtree. -// The height of the whole tree is assumed to be hasher.BitLen() func (s *HStar2) HStar2Nodes(prefix []byte, subtreeDepth int, values []HStar2LeafHash, get SparseGetNodeFunc, set SparseSetNodeFunc) ([]byte, error) { if glog.V(3) { - glog.Infof("HStar2Nodes(%x, %v, %v, %v)", prefix, depth, subtreeDepth, len(values)) + glog.Infof("HStar2Nodes(%x, %v, %v)", prefix, subtreeDepth, len(values)) for _, v := range values { glog.Infof(" %x: %x", v.Index.Bytes(), v.LeafHash) } @@ -98,7 +96,6 @@ func (s *HStar2) HStar2Nodes(prefix []byte, subtreeDepth int, values []HStar2Lea // hStar2b computes a sparse Merkle tree root value recursively. func (s *HStar2) hStar2b(depth, maxDepth int, values []HStar2LeafHash, offset *big.Int, get SparseGetNodeFunc, set SparseSetNodeFunc) ([]byte, error) { - log.Printf("hStar2b(%3v, %3v, %2d values, %x)", depth, maxDepth, len(values), offset.Bytes()) if depth == maxDepth { switch { case len(values) == 0: @@ -113,7 +110,7 @@ func (s *HStar2) hStar2b(depth, maxDepth int, values []HStar2LeafHash, offset *b return s.get(offset, depth, get) } - bitsLeft := maxDepth - depth + bitsLeft := s.hasher.BitLen() - depth split := new(big.Int).Lsh(smtOne, uint(bitsLeft-1)) split.Add(split, offset) i := sort.Search(len(values), func(i int) bool { return values[i].Index.Cmp(split) >= 0 }) @@ -144,7 +141,8 @@ func (s *HStar2) get(index *big.Int, depth int, getter SparseGetNodeFunc) ([]byt } } height := s.hasher.BitLen() - depth - return s.hasher.HashEmpty(s.treeID, PaddedBytes(index, s.hasher.Size()), height), nil + indexBytes := PaddedBytes(index, s.hasher.Size()) + return s.hasher.HashEmpty(s.treeID, indexBytes, height), nil } // set attempts to use setter if it not nil. diff --git a/merkle/hstar2_test.go b/merkle/hstar2_test.go index 95de58cff5..d299f6f121 100644 --- a/merkle/hstar2_test.go +++ b/merkle/hstar2_test.go @@ -130,7 +130,8 @@ func TestHStar2GetSet(t *testing.T) { // 256-prefixSize, and can be passed in as leaves to top-subtree calculation. func rootsForTrimmedKeys(t *testing.T, prefixSize int, lh []HStar2LeafHash) []HStar2LeafHash { var ret []HStar2LeafHash - s := NewHStar2(treeID, maphasher.Default) + hasher := maphasher.Default + s := NewHStar2(treeID, hasher) for i := range lh { subtreeDepth := s.hasher.BitLen() - prefixSize prefix := lh[i].Index.Bytes() @@ -143,7 +144,13 @@ func rootsForTrimmedKeys(t *testing.T, prefixSize int, lh []HStar2LeafHash) []HS if err != nil { t.Fatalf("Failed to calculate root %v", err) } - ret = append(ret, HStar2LeafHash{new(big.Int).SetBytes(prefix), root}) + + index := new(big.Int).SetBytes(prefix) + index = index.Lsh(index, uint(hasher.BitLen()-prefixSize)) + ret = append(ret, HStar2LeafHash{ + Index: index, + LeafHash: root, + }) } return ret } diff --git a/merkle/sparse_merkle_tree.go b/merkle/sparse_merkle_tree.go index 61da906998..2e8383928d 100644 --- a/merkle/sparse_merkle_tree.go +++ b/merkle/sparse_merkle_tree.go @@ -19,6 +19,7 @@ import ( "context" "errors" "fmt" + "log" "math/big" "sync" @@ -110,7 +111,7 @@ type subtreeWriter struct { tx storage.TreeTX treeRevision int64 - treeHasher hashers.MapHasher + hasher hashers.MapHasher getSubtree getSubtreeFunc } @@ -152,32 +153,31 @@ func (s *subtreeWriter) getOrCreateChildSubtree(ctx context.Context, childPrefix return subtree, nil } -// SetLeaf sets a single leaf hash for incorporation into the sparse Merkle -// tree. +// SetLeaf sets a single leaf hash for incorporation into the sparse Merkle tree. +// index is the full path of the leaf, starting from the root (not the subtree's root). func (s *subtreeWriter) SetLeaf(ctx context.Context, index []byte, hash []byte) error { - indexLen := len(index) * 8 + depth := len(index) * 8 + absSubtreeDepth := len(s.prefix)*8 + s.subtreeDepth switch { - case indexLen < s.subtreeDepth: - return fmt.Errorf("index length %d is < our depth %d", indexLen, s.subtreeDepth) + case depth < absSubtreeDepth: + return fmt.Errorf("depth: %d, want >= %d", depth, absSubtreeDepth) - case indexLen > s.subtreeDepth: - childPrefix := index[:s.subtreeDepth/8] + case depth > absSubtreeDepth: + childPrefix := index[:absSubtreeDepth/8] subtree, err := s.getOrCreateChildSubtree(ctx, childPrefix) if err != nil { return err } - return subtree.SetLeaf(ctx, index[s.subtreeDepth/8:], hash) + return subtree.SetLeaf(ctx, index, hash) - case indexLen == s.subtreeDepth: + default: // depth == absSubtreeDepth: s.leafQueue <- func() (*indexAndHash, error) { return &indexAndHash{index: index, hash: hash}, nil } return nil } - - return fmt.Errorf("internal logic error in SetLeaf. index length: %d, subtreeDepth: %d", indexLen, s.subtreeDepth) } // CalculateRoot initiates the process of calculating the subtree root. @@ -212,28 +212,27 @@ func (s *subtreeWriter) buildSubtree(ctx context.Context) { s.root <- rootHashOrError{hash: nil, err: err} return } + index := new(big.Int).SetBytes(ih.index) + index = index.Lsh(index, uint(s.hasher.BitLen()-len(ih.index)*8)) leaves = append(leaves, HStar2LeafHash{ - Index: new(big.Int).SetBytes(ih.index), + Index: index, LeafHash: ih.hash, }) nodesToStore = append(nodesToStore, storage.Node{ - NodeID: storage.NewNodeIDFromHash( - bytes.Join([][]byte{s.prefix, ih.index}, []byte{})), + NodeID: storage.NewNodeIDFromHash(ih.index), Hash: ih.hash, NodeRevision: s.treeRevision, }) } // calculate new root, and intermediate nodes: - hs2 := NewHStar2(s.treeID, s.treeHasher) - totalDepth := len(s.prefix)*8 + s.subtreeDepth - prefixDepth := len(s.prefix) * 8 - root, err := hs2.HStar2Nodes(nil, prefixDepth, s.subtreeDepth, leaves, - func(height int, index *big.Int) ([]byte, error) { - nodeID := storage.NewNodeIDFromRelativeBigInt(s.prefix, s.subtreeDepth, height, index, totalDepth) + hs2 := NewHStar2(s.treeID, s.hasher) + root, err := hs2.HStar2Nodes(s.prefix, s.subtreeDepth, leaves, + func(depth int, index *big.Int) ([]byte, error) { + nodeID := storage.NewNodeIDFromBigInt(depth, index, s.hasher.BitLen()) glog.V(4).Infof("buildSubtree.get(%x, %d) nid: %x, %v", - index.Bytes(), height, nodeID.Path, nodeID.PrefixLenBits) + index.Bytes(), depth, nodeID.Path, nodeID.PrefixLenBits) nodes, err := s.tx.GetMerkleNodes(ctx, s.treeRevision, []storage.NodeID{nodeID}) if err != nil { return nil, err @@ -241,23 +240,23 @@ func (s *subtreeWriter) buildSubtree(ctx context.Context) { if len(nodes) == 0 { return nil, nil } - if expected, got := nodeID, nodes[0].NodeID; !expected.Equivalent(got) { - return nil, fmt.Errorf("expected node ID %s from storage, but got %s", expected.String(), got.String()) + if got, want := nodes[0].NodeID, nodeID; !got.Equivalent(want) { + return nil, fmt.Errorf("got node %s from storage, want %s", got, want) } - if expected, got := s.treeRevision, nodes[0].NodeRevision; got > expected { - return nil, fmt.Errorf("expected node revision <= %d, but got %d", expected, got) + if got, want := nodes[0].NodeRevision, s.treeRevision; got > want { + return nil, fmt.Errorf("got node revision %d, want <= %d", got, want) } return nodes[0].Hash, nil }, - func(height int, index *big.Int, h []byte) error { + func(depth int, index *big.Int, h []byte) error { // Don't store the root node of the subtree - that's part of the parent // tree. - if height == 0 && len(s.prefix) > 0 { + if depth == len(s.prefix)*8 && len(s.prefix) > 0 { return nil } - nodeID := storage.NewNodeIDFromRelativeBigInt(s.prefix, s.subtreeDepth, height, index, totalDepth) + nodeID := storage.NewNodeIDFromBigInt(depth, index, s.hasher.BitLen()) glog.V(4).Infof("buildSubtree.set(%x, %v) nid: %x, %v : %x", - index.Bytes(), height, nodeID.Path, nodeID.PrefixLenBits, h) + index.Bytes(), depth, nodeID.Path, nodeID.PrefixLenBits, h) nodesToStore = append(nodesToStore, storage.Node{ NodeID: nodeID, @@ -327,7 +326,7 @@ func newLocalSubtreeWriter(ctx context.Context, treeID, rev int64, prefix []byte root: make(chan rootHashOrError, 1), children: make(map[string]Subtree), tx: tx, - treeHasher: h, + hasher: h, getSubtree: func(ctx context.Context, p []byte) (Subtree, error) { myPrefix := bytes.Join([][]byte{prefix, p}, []byte{}) return newLocalSubtreeWriter(ctx, treeID, rev, myPrefix, depths[1:], newTX, h) @@ -388,14 +387,20 @@ func (s SparseMerkleTreeReader) RootAtRevision(ctx context.Context, rev int64) ( func (s SparseMerkleTreeReader) InclusionProof(ctx context.Context, rev int64, index []byte) ([][]byte, error) { nid := storage.NewNodeIDFromHash(index) sibs := nid.Siblings() + log.Printf("Siblings: ") + for _, s := range sibs { + log.Printf(" %x", s.Path) + } nodes, err := s.tx.GetMerkleNodes(ctx, rev, sibs) if err != nil { return nil, err } nodeMap := make(map[string]*storage.Node) + log.Printf("Got Nodes: ") for _, n := range nodes { n := n // need this or we'll end up with the same node hash repeated in the map + log.Printf(" %x, %d: %x", n.NodeID.Path, len(n.NodeID.String()), n.Hash) nodeMap[n.NodeID.String()] = &n } diff --git a/merkle/sparse_merkle_tree_test.go b/merkle/sparse_merkle_tree_test.go index 2262907e6e..14286431a4 100644 --- a/merkle/sparse_merkle_tree_test.go +++ b/merkle/sparse_merkle_tree_test.go @@ -437,10 +437,14 @@ func testSparseTreeFetches(ctx context.Context, t *testing.T, vec sparseTestVect id := sibs[j].String() pathNode := nodeID.String()[:len(id)] if _, ok := reads[pathNode]; ok { - // we're modifying both children of a node because two keys are - // intersecting, since both will be recalculated neither will be read - // from storage so we remove the previously set expectation for this - // node's sibling, and skip adding one for this node: + // we're modifying both children of a + // node because two keys are + // intersecting, since both will be + // recalculated neither will be read + // from storage so we remove the + // previously set expectation for this + // node's sibling, and skip adding one + // for this node: delete(reads, pathNode) continue } diff --git a/storage/cache/map_subtree_cache.go b/storage/cache/map_subtree_cache.go index ee17dce6c9..2554b30c73 100644 --- a/storage/cache/map_subtree_cache.go +++ b/storage/cache/map_subtree_cache.go @@ -55,8 +55,7 @@ func populateMapSubtreeNodes(treeID int64, hasher hashers.MapHasher) storage.Pop }) } hs2 := merkle.NewHStar2(treeID, hasher) - offset := hasher.BitLen() - rootID.PrefixLenBits - int(st.Depth) - root, err := hs2.HStar2Nodes(int(st.Depth), offset, leaves, + root, err := hs2.HStar2Nodes(st.Prefix, rootID.PrefixLenBits, int(st.Depth), leaves, func(depth int, index *big.Int) ([]byte, error) { return nil, nil }, diff --git a/storage/types.go b/storage/types.go index e532b73d36..ee08149dd5 100644 --- a/storage/types.go +++ b/storage/types.go @@ -369,11 +369,6 @@ func (n *NodeID) Equivalent(other NodeID) bool { return n.String() == other.String() } -// Equal returns true iff a and b have the same string representation. -func Equal(a, b *NodeID) bool { - return a.String() == b.String() -} - // PopulateSubtreeFunc is a function which knows how to re-populate a subtree // from just its leaf nodes. type PopulateSubtreeFunc func(*storagepb.SubtreeProto) error From 450e2d29103ef1b5f564ce4cf2f3a8f7c9605d3f Mon Sep 17 00:00:00 2001 From: Gary Belvin Date: Tue, 25 Jul 2017 15:37:11 +0100 Subject: [PATCH 03/12] Add MaskIndex --- merkle/hstar2.go | 1 + merkle/hstar2_test.go | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/merkle/hstar2.go b/merkle/hstar2.go index 3d882611e9..728445e910 100644 --- a/merkle/hstar2.go +++ b/merkle/hstar2.go @@ -142,6 +142,7 @@ func (s *HStar2) get(index *big.Int, depth int, getter SparseGetNodeFunc) ([]byt } height := s.hasher.BitLen() - depth indexBytes := PaddedBytes(index, s.hasher.Size()) + indexBytes = MaskIndex(indexBytes, depth) return s.hasher.HashEmpty(s.treeID, indexBytes, height), nil } diff --git a/merkle/hstar2_test.go b/merkle/hstar2_test.go index d299f6f121..c92e6c2ffc 100644 --- a/merkle/hstar2_test.go +++ b/merkle/hstar2_test.go @@ -178,16 +178,16 @@ func TestHStar2OffsetRootKAT(t *testing.T) { if got, want := root, x.root; !bytes.Equal(got, want) { t.Errorf("HStar2Nodes(i: %v, size:%v): %x, want: %x", i, size, got, want) } + break } + break } } func TestHStar2NegativeTreeLevelOffset(t *testing.T) { s := NewHStar2(treeID, maphasher.Default) - _, err := s.HStar2Nodes(make([]byte, 31), 9, []HStar2LeafHash{}, - func(int, *big.Int) ([]byte, error) { return nil, nil }, - func(int, *big.Int, []byte) error { return nil }) + _, err := s.HStar2Nodes(make([]byte, 31), 9, []HStar2LeafHash{}, nil, nil) if got, want := err, ErrNegativeTreeLevelOffset; got != want { t.Fatalf("Hstar2Nodes(): %v, want %v", got, want) } From 906591d11c7ec14b3e541f4689584bad77b50826 Mon Sep 17 00:00:00 2001 From: Gary Belvin Date: Thu, 27 Jul 2017 12:30:45 +0100 Subject: [PATCH 04/12] ParseSuffix --- storage/cache/map_subtree_cache.go | 15 +++++++-------- storage/suffix.go | 13 +++++++++++++ storage/suffix_test.go | 27 +++++++++++++++++++++++++++ 3 files changed, 47 insertions(+), 8 deletions(-) diff --git a/storage/cache/map_subtree_cache.go b/storage/cache/map_subtree_cache.go index 2554b30c73..14e682def8 100644 --- a/storage/cache/map_subtree_cache.go +++ b/storage/cache/map_subtree_cache.go @@ -39,32 +39,31 @@ func NewMapSubtreeCache(mapStrata []int, treeID int64, hasher hashers.MapHasher) func populateMapSubtreeNodes(treeID int64, hasher hashers.MapHasher) storage.PopulateSubtreeFunc { return func(st *storagepb.SubtreeProto) error { st.InternalNodes = make(map[string][]byte) - rootID := storage.NewNodeIDFromHash(st.Prefix) leaves := make([]merkle.HStar2LeafHash, 0, len(st.Leaves)) for k64, v := range st.Leaves { k, err := base64.StdEncoding.DecodeString(k64) if err != nil { return err } - if k[0]%depthQuantum != 0 { + // TODO(gdbelvin): test against subtree depth. + if sfx.Bits%depthQuantum != 0 { return fmt.Errorf("unexpected non-leaf suffix found: %x", k) } + index := new(big.Int).SetBytes(sfx.Path) + index = index.Lsh(index, uint(hasher.BitLen()-len(index.Bytes()))) leaves = append(leaves, merkle.HStar2LeafHash{ + Index: index, LeafHash: v, - Index: new(big.Int).SetBytes(k[1:]), }) } hs2 := merkle.NewHStar2(treeID, hasher) - root, err := hs2.HStar2Nodes(st.Prefix, rootID.PrefixLenBits, int(st.Depth), leaves, - func(depth int, index *big.Int) ([]byte, error) { - return nil, nil - }, + root, err := hs2.HStar2Nodes(st.Prefix, int(st.Depth), leaves, nil, func(depth int, index *big.Int, h []byte) error { if depth == 0 && len(st.Prefix) > 0 { // no space for the root in the node cache return nil } - nodeID := storage.NewNodeIDFromRelativeBigInt(st.Prefix, int(st.Depth), depth, index, hasher.BitLen()) + nodeID := storage.NewNodeIDFromBigInt(depth, index, hasher.BitLen()) _, sfx := nodeID.Split(len(st.Prefix), int(st.Depth)) sfxKey := sfx.String() if glog.V(4) { diff --git a/storage/suffix.go b/storage/suffix.go index 18cd60a1f7..ebf4a7d21b 100644 --- a/storage/suffix.go +++ b/storage/suffix.go @@ -37,3 +37,16 @@ func (s Suffix) String() string { r = append(r, s.Path...) return base64.StdEncoding.EncodeToString(r) } + +// ParseSuffix converts a suffix string back into a Suffix. +func ParseSuffix(s string) (Suffix, error) { + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + return Suffix{}, err + } + + return Suffix{ + Bits: byte(b[0]), + Path: b[1:], + }, nil +} diff --git a/storage/suffix_test.go b/storage/suffix_test.go index 5477aa053f..d637488a4b 100644 --- a/storage/suffix_test.go +++ b/storage/suffix_test.go @@ -29,6 +29,33 @@ const ( // storage/cache when merkle no longer depends on storage.NodeID ) +func TestParseSuffix(t *testing.T) { + for _, tc := range []struct { + prefix []byte + leafIndex int64 + want []byte + }{ + {h2b(""), 1, h2b("0801")}, + {h2b("00"), 1, h2b("0801")}, + } { + nodeID := NewNodeIDFromPrefix(tc.prefix, logStrataDepth, tc.leafIndex, logStrataDepth, maxLogDepth) + _, sfx := nodeID.Split(len(tc.prefix), logStrataDepth) + sfxKey := sfx.String() + + sfxP, err := ParseSuffix(sfxKey) + if err != nil { + t.Errorf("ParseSuffix(%s): %v", sfxKey, err) + continue + } + if got, want := sfx.Bits, sfxP.Bits; got != want { + t.Errorf("ParseSuffix(%s).Bits: %v, want %v", sfxKey, got, want) + } + if got, want := sfx.Path, sfxP.Path; !bytes.Equal(got, want) { + t.Errorf("ParseSuffix(%s).Bits: %x, want %x", sfxKey, got, want) + } + } +} + // TestSuffixKeyEquals ensures that NodeID.Split produces the same output as makeSuffixKey for the Log's use cases. func TestSuffixKeyEquals(t *testing.T) { for _, tc := range []struct { From 09956237a30c5f2b70284ac057f9d292d4903c30 Mon Sep 17 00:00:00 2001 From: Gary Belvin Date: Thu, 27 Jul 2017 20:16:30 +0100 Subject: [PATCH 05/12] Tests pass - proper indexes to HStar2 --- integration/maptest/map_test.go | 18 ++++++++++++++---- storage/cache/map_subtree_cache.go | 7 ++++--- storage/cache/subtree_cache.go | 2 +- 3 files changed, 19 insertions(+), 8 deletions(-) diff --git a/integration/maptest/map_test.go b/integration/maptest/map_test.go index a05bd07496..fe83487f97 100644 --- a/integration/maptest/map_test.go +++ b/integration/maptest/map_test.go @@ -232,10 +232,19 @@ func TestInclusion(t *testing.T) { }, }, { - desc: "CONIKS", + desc: "CONIKS single", HashStrategy: trillian.HashStrategy_CONIKS_SHA512_256, leaves: []*trillian.MapLeaf{ - {Index: h2b("4100000000000000000000000000000000000000000000000000000000000000"), LeafValue: []byte("A")}, + {Index: h2b("0000000000000000000000000000000000000000000000000000000000000000"), LeafValue: []byte("A")}, + }, + }, + { + desc: "maphasher multi", + HashStrategy: trillian.HashStrategy_TEST_MAP_HASHER, + leaves: []*trillian.MapLeaf{ + {Index: h2b("0000000000000000000000000000000000000000000000000000000000000000"), LeafValue: []byte("A")}, + {Index: h2b("0000000000000000000000000000000000000000000000000000000000000001"), LeafValue: []byte("B")}, + {Index: h2b("0000000000000000000000000000000000000000000000000000000000000002"), LeafValue: []byte("C")}, }, }, } { @@ -290,10 +299,11 @@ func TestInclusionBatch(t *testing.T) { HashStrategy trillian.HashStrategy batchSize, numBatches int }{ + { desc: "maphasher batch", HashStrategy: trillian.HashStrategy_TEST_MAP_HASHER, - batchSize: 64, numBatches: 32, + batchSize: 1, numBatches: 1, }, // TODO(gdbelvin): investigate batches of size > 150. // We are currently getting DB connection starvation: Too many connections. @@ -304,7 +314,7 @@ func TestInclusionBatch(t *testing.T) { } if err := RunMapBatchTest(ctx, env, tree, tc.batchSize, tc.numBatches); err != nil { - t.Errorf("%v: %v", tc.desc, err) + t.Errorf("BatchSize: %v, Batches: %v: %v", tc.batchSize, tc.numBatches, err) } } } diff --git a/storage/cache/map_subtree_cache.go b/storage/cache/map_subtree_cache.go index 14e682def8..58b2853fde 100644 --- a/storage/cache/map_subtree_cache.go +++ b/storage/cache/map_subtree_cache.go @@ -41,13 +41,13 @@ func populateMapSubtreeNodes(treeID int64, hasher hashers.MapHasher) storage.Pop st.InternalNodes = make(map[string][]byte) leaves := make([]merkle.HStar2LeafHash, 0, len(st.Leaves)) for k64, v := range st.Leaves { - k, err := base64.StdEncoding.DecodeString(k64) + sfx, err := storage.ParseSuffix(k64) if err != nil { return err } // TODO(gdbelvin): test against subtree depth. if sfx.Bits%depthQuantum != 0 { - return fmt.Errorf("unexpected non-leaf suffix found: %x", k) + return fmt.Errorf("unexpected non-leaf suffix found: %x", sfx.Bits) } index := new(big.Int).SetBytes(sfx.Path) index = index.Lsh(index, uint(hasher.BitLen()-len(index.Bytes()))) @@ -59,7 +59,8 @@ func populateMapSubtreeNodes(treeID int64, hasher hashers.MapHasher) storage.Pop hs2 := merkle.NewHStar2(treeID, hasher) root, err := hs2.HStar2Nodes(st.Prefix, int(st.Depth), leaves, nil, func(depth int, index *big.Int, h []byte) error { - if depth == 0 && len(st.Prefix) > 0 { + //if depth == 0 && len(st.Prefix) > 0 { + if depth == len(st.Prefix)*8 { // no space for the root in the node cache return nil } diff --git a/storage/cache/subtree_cache.go b/storage/cache/subtree_cache.go index b066958ca5..37d5985506 100644 --- a/storage/cache/subtree_cache.go +++ b/storage/cache/subtree_cache.go @@ -340,7 +340,7 @@ func (s *SubtreeCache) SetNodeHash(id storage.NodeID, h []byte, getSubtree GetSu if err != nil { glog.Errorf("base64.DecodeString(%v): %v", sfxKey, err) } - glog.Infof("SetNodeHash(pfx: %s, sfx: %x): %x", prefixKey, b, h) + glog.Infof("SetNodeHash(pfx: %x, sfx: %x): %x", prefixKey, b, h) } return nil } From 11e8de15abe37ac1aad002410f4ed540b3796e2e Mon Sep 17 00:00:00 2001 From: Gary Belvin Date: Fri, 28 Jul 2017 15:17:51 +0100 Subject: [PATCH 06/12] NewNodeIDFromPrefixSuffix --- integration/maptest/map_test.go | 8 ++--- storage/cache/map_subtree_cache.go | 5 ++- storage/types.go | 19 +++++++++++ storage/types_test.go | 52 +++++++++++++++++++----------- 4 files changed, 58 insertions(+), 26 deletions(-) diff --git a/integration/maptest/map_test.go b/integration/maptest/map_test.go index fe83487f97..41542c291a 100644 --- a/integration/maptest/map_test.go +++ b/integration/maptest/map_test.go @@ -232,15 +232,15 @@ func TestInclusion(t *testing.T) { }, }, { - desc: "CONIKS single", + desc: "CONIKS across subtrees", HashStrategy: trillian.HashStrategy_CONIKS_SHA512_256, leaves: []*trillian.MapLeaf{ - {Index: h2b("0000000000000000000000000000000000000000000000000000000000000000"), LeafValue: []byte("A")}, + {Index: h2b("0000000000000180000000000000000000000000000000000000000000000000"), LeafValue: []byte("Z")}, }, }, { - desc: "maphasher multi", - HashStrategy: trillian.HashStrategy_TEST_MAP_HASHER, + desc: "CONIKS multi", + HashStrategy: trillian.HashStrategy_CONIKS_SHA512_256, leaves: []*trillian.MapLeaf{ {Index: h2b("0000000000000000000000000000000000000000000000000000000000000000"), LeafValue: []byte("A")}, {Index: h2b("0000000000000000000000000000000000000000000000000000000000000001"), LeafValue: []byte("B")}, diff --git a/storage/cache/map_subtree_cache.go b/storage/cache/map_subtree_cache.go index 58b2853fde..b0352cf605 100644 --- a/storage/cache/map_subtree_cache.go +++ b/storage/cache/map_subtree_cache.go @@ -49,10 +49,9 @@ func populateMapSubtreeNodes(treeID int64, hasher hashers.MapHasher) storage.Pop if sfx.Bits%depthQuantum != 0 { return fmt.Errorf("unexpected non-leaf suffix found: %x", sfx.Bits) } - index := new(big.Int).SetBytes(sfx.Path) - index = index.Lsh(index, uint(hasher.BitLen()-len(index.Bytes()))) + leaves = append(leaves, merkle.HStar2LeafHash{ - Index: index, + Index: storage.NewNodeIDFromPrefixSuffix(st.Prefix, sfx, hasher.BitLen()).BigInt(), LeafHash: v, }) } diff --git a/storage/types.go b/storage/types.go index ee08149dd5..b0f13b9751 100644 --- a/storage/types.go +++ b/storage/types.go @@ -166,6 +166,8 @@ func NewNodeIDFromBigInt(depth int, index *big.Int, totalDepth int) NodeID { copy(path[unusedHighBytes:], index.Bytes()) // TODO(gdbelvin): consider masking off insignificant bits past depth. + glog.V(5).Infof("NewNodeIDFromBigInt(%v, %x, %v): %v, %x", + depth, index.Bytes(), totalDepth, depth, path) return NodeID{ Path: path, @@ -173,6 +175,11 @@ func NewNodeIDFromBigInt(depth int, index *big.Int, totalDepth int) NodeID { } } +// BigInt returns the big.Int for this node. +func (n NodeID) BigInt() *big.Int { + return new(big.Int).SetBytes(n.Path) +} + // NewNodeIDWithPrefix creates a new NodeID of nodeIDLen bits with the prefixLen MSBs set to prefix. // NewNodeIDWithPrefix places the lower prefixLenBits of prefix in the most significant bits of path. // Path will have enough bytes to hold maxLenBits @@ -337,6 +344,18 @@ func (n *NodeID) Siblings() []NodeID { return sibs } +// NewNodeIDFromPrefixSuffix undoes Split() and returns the NodeID. +func NewNodeIDFromPrefixSuffix(prefix []byte, suffix Suffix, maxPathBits int) NodeID { + path := make([]byte, maxPathBits/8) + copy(path, prefix) + copy(path[len(prefix):], suffix.Path) + + return NodeID{ + Path: path, + PrefixLenBits: len(prefix)*8 + int(suffix.Bits), + } +} + // Split splits a NodeID into a prefix and a suffix at prefixSplit func (n *NodeID) Split(prefixBytes, suffixBits int) ([]byte, Suffix) { if n.PrefixLenBits == 0 { diff --git a/storage/types_test.go b/storage/types_test.go index 5fd0c16f59..fd00999575 100644 --- a/storage/types_test.go +++ b/storage/types_test.go @@ -90,27 +90,28 @@ func TestSplit(t *testing.T) { outPrefix []byte outSuffixBits int outSuffix []byte + unusedBytes int }{ - {h2b("1234567f"), 32, 3, 8, h2b("123456"), 8, h2b("7f")}, - {h2b("123456ff"), 29, 3, 8, h2b("123456"), 5, h2b("f8")}, - {h2b("123456ff"), 25, 3, 8, h2b("123456"), 1, h2b("80")}, - {h2b("12345678"), 16, 1, 8, h2b("12"), 8, h2b("34")}, - {h2b("12345678"), 9, 1, 8, h2b("12"), 1, h2b("00")}, - {h2b("12345678"), 8, 0, 8, h2b(""), 8, h2b("12")}, - {h2b("12345678"), 7, 0, 8, h2b(""), 7, h2b("12")}, - {h2b("12345678"), 0, 0, 8, h2b(""), 0, h2b("00")}, - {h2b("70"), 2, 0, 8, h2b(""), 2, h2b("40")}, - {h2b("70"), 3, 0, 8, h2b(""), 3, h2b("60")}, - {h2b("70"), 4, 0, 8, h2b(""), 4, h2b("70")}, - {h2b("70"), 5, 0, 8, h2b(""), 5, h2b("70")}, - {h2b("0003"), 16, 1, 8, h2b("00"), 8, h2b("03")}, - {h2b("0003"), 15, 1, 8, h2b("00"), 7, h2b("02")}, - {h2b("0001000000000000"), 16, 1, 8, h2b("00"), 8, h2b("01")}, - {h2b("0100000000000000"), 8, 0, 8, h2b(""), 8, h2b("01")}, + {h2b("1234567f"), 32, 3, 8, h2b("123456"), 8, h2b("7f"), 0}, + {h2b("123456ff"), 29, 3, 8, h2b("123456"), 5, h2b("f8"), 0}, + {h2b("123456ff"), 25, 3, 8, h2b("123456"), 1, h2b("80"), 0}, + {h2b("12345678"), 16, 1, 8, h2b("12"), 8, h2b("34"), 2}, + {h2b("12345678"), 9, 1, 8, h2b("12"), 1, h2b("00"), 2}, + {h2b("12345678"), 8, 0, 8, h2b(""), 8, h2b("12"), 3}, + {h2b("12345678"), 7, 0, 8, h2b(""), 7, h2b("12"), 3}, + {h2b("12345678"), 0, 0, 8, h2b(""), 0, h2b("00"), 3}, + {h2b("70"), 2, 0, 8, h2b(""), 2, h2b("40"), 0}, + {h2b("70"), 3, 0, 8, h2b(""), 3, h2b("60"), 0}, + {h2b("70"), 4, 0, 8, h2b(""), 4, h2b("70"), 0}, + {h2b("70"), 5, 0, 8, h2b(""), 5, h2b("70"), 0}, + {h2b("0003"), 16, 1, 8, h2b("00"), 8, h2b("03"), 0}, + {h2b("0003"), 15, 1, 8, h2b("00"), 7, h2b("02"), 0}, + {h2b("0001000000000000"), 16, 1, 8, h2b("00"), 8, h2b("01"), 6}, + {h2b("0100000000000000"), 8, 0, 8, h2b(""), 8, h2b("01"), 7}, // Map subtree scenarios - {h2b("0100000000000000"), 16, 0, 16, h2b(""), 16, h2b("0100")}, - {h2b("0100000000000000"), 32, 0, 32, h2b(""), 32, h2b("01000000")}, - {h2b("0000000000000000000000000000000000000000000000000000000000000001"), 256, 10, 176, h2b("00000000000000000000"), 176, h2b("00000000000000000000000000000000000000000001")}, + {h2b("0100000000000000"), 16, 0, 16, h2b(""), 16, h2b("0100"), 6}, + {h2b("0100000000000000"), 32, 0, 32, h2b(""), 32, h2b("01000000"), 4}, + {h2b("0000000000000000000000000000000000000000000000000000000000000001"), 256, 10, 176, h2b("00000000000000000000"), 176, h2b("00000000000000000000000000000000000000000001"), 0}, } { n := NewNodeIDFromHash(tc.inPath) n.PrefixLenBits = tc.inPathLenBits @@ -129,6 +130,19 @@ func TestSplit(t *testing.T) { if got, want := s.Path, tc.outSuffix; !bytes.Equal(got, want) { t.Errorf("%d, %x.Split(%v, %v).Path: %x, want %x", tc.inPathLenBits, tc.inPath, tc.splitBytes, tc.suffixBits, got, want) + continue + } + + newNode := NewNodeIDFromPrefixSuffix(p, s, len(tc.inPath)*8) + want := []byte{} + want = append(want, tc.outPrefix...) + want = append(want, tc.outSuffix...) + want = append(want, make([]byte, tc.unusedBytes)...) + if got, want := newNode.Path, want; !bytes.Equal(got, want) { + t.Errorf("NewNodeIDFromPrefix(%x, %v).Path: %x, want %x", p, s, got, want) + } + if got, want := newNode.PrefixLenBits, n.PrefixLenBits; got != want { + t.Errorf("NewNodeIDFromPrefix(%x, %v).PrefixLenBits: %x, want %x", p, s, got, want) } } } From c64bbdf8cd341883982f68c9ecd6b15ae9140c47 Mon Sep 17 00:00:00 2001 From: Gary Belvin Date: Fri, 28 Jul 2017 15:25:58 +0100 Subject: [PATCH 07/12] Remove NewNodeIDFromRelativeBigInt --- storage/types.go | 22 ---------------------- storage/types_test.go | 33 --------------------------------- 2 files changed, 55 deletions(-) diff --git a/storage/types.go b/storage/types.go index b0f13b9751..da27d1076f 100644 --- a/storage/types.go +++ b/storage/types.go @@ -130,28 +130,6 @@ func NewNodeIDFromPrefix(prefix []byte, depth int, index int64, subDepth, totalD } } -// NewNodeIDFromRelativeBigInt returns a NodeID given by a subtree and a subtree index. -// depth is the number of levels down from the top of the subtree -// subIndex is the path from the root of the subtree to the desired node, and continuing down to the bottom of the subtree. -// subIndex = horizontal index << height. -func NewNodeIDFromRelativeBigInt(prefix []byte, subtreeDepth, depth int, subIndex *big.Int, totalDepth int) NodeID { - // Put prefix in the MSB bits of path. - path := make([]byte, totalDepth/8) - copy(path, prefix) - - // Copy subIndex into subPath, right justified. - subPath := path[len(prefix) : len(prefix)+subtreeDepth/8] - unusedSubBytes := len(subPath) - len(subIndex.Bytes()) - copy(subPath[unusedSubBytes:], subIndex.Bytes()) - - glog.V(5).Infof("NewNodeIDFromRelativeBigInt({%x, %v}, %v, %x, %v): %v, %x", - prefix, subtreeDepth, depth, subIndex.Bytes(), totalDepth, len(prefix)*8+depth, path) - return NodeID{ - Path: path, - PrefixLenBits: len(prefix)*8 + depth, - } -} - // NewNodeIDFromBigInt returns a NodeID of a big.Int with no prefix. // index contains the path's least significant bits. // depth indicates the number of bits from the most significant bit to treat as part of the path. diff --git a/storage/types_test.go b/storage/types_test.go index fd00999575..b7bdba462f 100644 --- a/storage/types_test.go +++ b/storage/types_test.go @@ -147,39 +147,6 @@ func TestSplit(t *testing.T) { } } -func TestNewNodeIDFromRelativeBigInt(t *testing.T) { - for _, tc := range []struct { - prefix []byte - depth int - index int64 - subDepth int - totalDepth int - wantPath []byte - wantDepth int - }{ - {prefix: h2b(""), depth: 8, index: 0, subDepth: 8, totalDepth: 64, wantPath: h2b("0000000000000000"), wantDepth: 8}, - {prefix: h2b(""), depth: 8, index: 1, subDepth: 8, totalDepth: 64, wantPath: h2b("0100000000000000"), wantDepth: 8}, - {prefix: h2b("00"), depth: 7, index: 1, subDepth: 8, totalDepth: 64, wantPath: h2b("0001000000000000"), wantDepth: 15}, - {prefix: h2b("00"), depth: 8, index: 1, subDepth: 8, totalDepth: 64, wantPath: h2b("0001000000000000"), wantDepth: 16}, - {prefix: h2b("00"), depth: 16, index: 257, subDepth: 16, totalDepth: 64, wantPath: h2b("0001010000000000"), wantDepth: 24}, - {prefix: h2b("12345678"), depth: 8, index: 1, subDepth: 8, totalDepth: 64, wantPath: h2b("1234567801000000"), wantDepth: 40}, - - {prefix: h2b("00"), subDepth: 248, depth: 247, index: 1, totalDepth: 256, wantPath: h2b("0000000000000000000000000000000000000000000000000000000000000001"), wantDepth: 255}, - {prefix: h2b("00000000000000000000"), subDepth: 176, depth: 176, index: 1, totalDepth: 256, wantPath: h2b("0000000000000000000000000000000000000000000000000000000000000001"), wantDepth: 256}, - } { - i := big.NewInt(tc.index) - n := NewNodeIDFromRelativeBigInt(tc.prefix, tc.subDepth, tc.depth, i, tc.totalDepth) - if got, want := n.Path, tc.wantPath; !bytes.Equal(got, want) { - t.Errorf("NewNodeIDFromRelativeBigInt(%x, %v, %v, %v, %v).Path: %x, want %x", - tc.prefix, tc.depth, tc.index, tc.subDepth, tc.totalDepth, got, want) - } - if got, want := n.PrefixLenBits, tc.wantDepth; got != want { - t.Errorf("NewNodeIDFromRelativeBigInt(%x, %v, %v, %v, %v).Depth: %v, want %v", - tc.prefix, tc.depth, tc.index, tc.subDepth, tc.totalDepth, got, want) - } - } -} - func TestNewNodeIDFromPrefix(t *testing.T) { for _, tc := range []struct { prefix []byte From dc2adc46318f1dae95005deaa0ba79d3f7ec41d7 Mon Sep 17 00:00:00 2001 From: Gary Belvin Date: Fri, 28 Jul 2017 16:19:02 +0100 Subject: [PATCH 08/12] Use NewFromPrefixSuffix --- integration/maptest/map_test.go | 2 +- merkle/hstar2.go | 30 ++++++++++-------------------- merkle/hstar2_test.go | 26 +++----------------------- merkle/sparse_merkle_tree.go | 12 ++++++------ merkle/sparse_merkle_tree_test.go | 12 ++++-------- 5 files changed, 24 insertions(+), 58 deletions(-) diff --git a/integration/maptest/map_test.go b/integration/maptest/map_test.go index 41542c291a..8089a0af3e 100644 --- a/integration/maptest/map_test.go +++ b/integration/maptest/map_test.go @@ -303,7 +303,7 @@ func TestInclusionBatch(t *testing.T) { { desc: "maphasher batch", HashStrategy: trillian.HashStrategy_TEST_MAP_HASHER, - batchSize: 1, numBatches: 1, + batchSize: 64, numBatches: 32, }, // TODO(gdbelvin): investigate batches of size > 150. // We are currently getting DB connection starvation: Too many connections. diff --git a/merkle/hstar2.go b/merkle/hstar2.go index 728445e910..96ec2829f0 100644 --- a/merkle/hstar2.go +++ b/merkle/hstar2.go @@ -22,13 +22,14 @@ import ( "github.com/golang/glog" "github.com/google/trillian/merkle/hashers" + "github.com/google/trillian/storage" ) var ( - // ErrNegativeTreeLevelOffset indicates a negative level was specified. - ErrNegativeTreeLevelOffset = errors.New("treeLevelOffset cannot be negative") - smtOne = big.NewInt(1) - smtZero = big.NewInt(0) + // ErrSubtreeOverrun indicates that a subtree exceeds the maximum tree depth. + ErrSubtreeOverrun = errors.New("subtree with prefix exceeds maximum tree size") + smtOne = big.NewInt(1) + smtZero = big.NewInt(0) ) // HStar2LeafHash represents a leaf for the HStar2 sparse Merkle tree @@ -85,11 +86,10 @@ func (s *HStar2) HStar2Nodes(prefix []byte, subtreeDepth int, values []HStar2Lea depth := len(prefix) * 8 totalDepth := depth + subtreeDepth if totalDepth > s.hasher.BitLen() { - return nil, ErrNegativeTreeLevelOffset + return nil, ErrSubtreeOverrun } sort.Sort(ByIndex{values}) - offset := new(big.Int).SetBytes(prefix) - offset = offset.Lsh(offset, uint(s.hasher.BitLen()-depth)) // shift prefix into place. + offset := storage.NewNodeIDFromPrefixSuffix(prefix, storage.Suffix{}, s.hasher.BitLen()).BigInt() return s.hStar2b(depth, totalDepth, values, offset, get, set) } @@ -140,10 +140,10 @@ func (s *HStar2) get(index *big.Int, depth int, getter SparseGetNodeFunc) ([]byt return h, nil } } + // TODO(gdbelvin): Hashers should accept depth as their main argument. height := s.hasher.BitLen() - depth - indexBytes := PaddedBytes(index, s.hasher.Size()) - indexBytes = MaskIndex(indexBytes, depth) - return s.hasher.HashEmpty(s.treeID, indexBytes, height), nil + nodeID := storage.NewNodeIDFromBigInt(index.BitLen(), index, s.hasher.BitLen()) + return s.hasher.HashEmpty(s.treeID, nodeID.Path, height), nil } // set attempts to use setter if it not nil. @@ -170,13 +170,3 @@ type ByIndex struct{ Leaves } // Less returns true if i.Index < j.Index func (s ByIndex) Less(i, j int) bool { return s.Leaves[i].Index.Cmp(s.Leaves[j].Index) < 0 } - -// PaddedBytes takes a big.Int and returns it's value, left padded with zeros. -// e.g. 1 -> 0000000000000000000000000000000000000001 -func PaddedBytes(i *big.Int, size int) []byte { - b := i.Bytes() - ret := make([]byte, size) - padBytes := len(ret) - len(b) - copy(ret[padBytes:], b) - return ret -} diff --git a/merkle/hstar2_test.go b/merkle/hstar2_test.go index c92e6c2ffc..626c17d5a4 100644 --- a/merkle/hstar2_test.go +++ b/merkle/hstar2_test.go @@ -22,6 +22,7 @@ import ( "github.com/google/trillian/merkle/hashers" "github.com/google/trillian/merkle/maphasher" + "github.com/google/trillian/storage" "github.com/google/trillian/testonly" ) @@ -145,10 +146,8 @@ func rootsForTrimmedKeys(t *testing.T, prefixSize int, lh []HStar2LeafHash) []HS t.Fatalf("Failed to calculate root %v", err) } - index := new(big.Int).SetBytes(prefix) - index = index.Lsh(index, uint(hasher.BitLen()-prefixSize)) ret = append(ret, HStar2LeafHash{ - Index: index, + Index: storage.NewNodeIDFromPrefixSuffix(prefix, storage.Suffix{}, hasher.BitLen()).BigInt(), LeafHash: root, }) } @@ -178,9 +177,7 @@ func TestHStar2OffsetRootKAT(t *testing.T) { if got, want := root, x.root; !bytes.Equal(got, want) { t.Errorf("HStar2Nodes(i: %v, size:%v): %x, want: %x", i, size, got, want) } - break } - break } } @@ -188,24 +185,7 @@ func TestHStar2NegativeTreeLevelOffset(t *testing.T) { s := NewHStar2(treeID, maphasher.Default) _, err := s.HStar2Nodes(make([]byte, 31), 9, []HStar2LeafHash{}, nil, nil) - if got, want := err, ErrNegativeTreeLevelOffset; got != want { + if got, want := err, ErrSubtreeOverrun; got != want { t.Fatalf("Hstar2Nodes(): %v, want %v", got, want) } } - -func TestPaddedBytes(t *testing.T) { - size := 160 / 8 - for _, tc := range []struct { - i *big.Int - want []byte - }{ - {i: big.NewInt(0), want: h2b("0000000000000000000000000000000000000000")}, - {i: big.NewInt(1), want: h2b("0000000000000000000000000000000000000001")}, - {i: new(big.Int).SetBytes(h2b("00FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF0F")), want: h2b("00FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF0F")}, - {i: new(big.Int).SetBytes(h2b("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF0F")), want: h2b("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF0F")}, - } { - if got, want := PaddedBytes(tc.i, size), tc.want; !bytes.Equal(got, want) { - t.Errorf("PaddedBytes(%d): %x, want %x", tc.i, got, want) - } - } -} diff --git a/merkle/sparse_merkle_tree.go b/merkle/sparse_merkle_tree.go index 2e8383928d..a3209d0052 100644 --- a/merkle/sparse_merkle_tree.go +++ b/merkle/sparse_merkle_tree.go @@ -212,15 +212,15 @@ func (s *subtreeWriter) buildSubtree(ctx context.Context) { s.root <- rootHashOrError{hash: nil, err: err} return } - index := new(big.Int).SetBytes(ih.index) - index = index.Lsh(index, uint(s.hasher.BitLen()-len(ih.index)*8)) + nodeID := storage.NewNodeIDFromPrefixSuffix(ih.index, storage.Suffix{}, s.hasher.BitLen()) + leaves = append(leaves, HStar2LeafHash{ - Index: index, + Index: nodeID.BigInt(), LeafHash: ih.hash, }) nodesToStore = append(nodesToStore, storage.Node{ - NodeID: storage.NewNodeIDFromHash(ih.index), + NodeID: nodeID, Hash: ih.hash, NodeRevision: s.treeRevision, }) @@ -397,10 +397,10 @@ func (s SparseMerkleTreeReader) InclusionProof(ctx context.Context, rev int64, i } nodeMap := make(map[string]*storage.Node) - log.Printf("Got Nodes: ") + glog.Infof("Got Nodes: ") for _, n := range nodes { n := n // need this or we'll end up with the same node hash repeated in the map - log.Printf(" %x, %d: %x", n.NodeID.Path, len(n.NodeID.String()), n.Hash) + glog.Infof(" %x, %d: %x", n.NodeID.Path, len(n.NodeID.String()), n.Hash) nodeMap[n.NodeID.String()] = &n } diff --git a/merkle/sparse_merkle_tree_test.go b/merkle/sparse_merkle_tree_test.go index 14286431a4..2262907e6e 100644 --- a/merkle/sparse_merkle_tree_test.go +++ b/merkle/sparse_merkle_tree_test.go @@ -437,14 +437,10 @@ func testSparseTreeFetches(ctx context.Context, t *testing.T, vec sparseTestVect id := sibs[j].String() pathNode := nodeID.String()[:len(id)] if _, ok := reads[pathNode]; ok { - // we're modifying both children of a - // node because two keys are - // intersecting, since both will be - // recalculated neither will be read - // from storage so we remove the - // previously set expectation for this - // node's sibling, and skip adding one - // for this node: + // we're modifying both children of a node because two keys are + // intersecting, since both will be recalculated neither will be read + // from storage so we remove the previously set expectation for this + // node's sibling, and skip adding one for this node: delete(reads, pathNode) continue } From f025f06d3eb612118e2cae73b107160dd8674fbb Mon Sep 17 00:00:00 2001 From: Gary Belvin Date: Mon, 31 Jul 2017 14:26:58 +0100 Subject: [PATCH 09/12] Define short and long tests --- integration/maptest/map_test.go | 12 ++++++++++++ scripts/presubmit.sh | 2 +- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/integration/maptest/map_test.go b/integration/maptest/map_test.go index 8089a0af3e..ffb9c7582c 100644 --- a/integration/maptest/map_test.go +++ b/integration/maptest/map_test.go @@ -298,16 +298,28 @@ func TestInclusionBatch(t *testing.T) { desc string HashStrategy trillian.HashStrategy batchSize, numBatches int + large bool }{ + { + desc: "maphasher short batch", + HashStrategy: trillian.HashStrategy_TEST_MAP_HASHER, + batchSize: 10, numBatches: 10, + large: false, + }, { desc: "maphasher batch", HashStrategy: trillian.HashStrategy_TEST_MAP_HASHER, batchSize: 64, numBatches: 32, + large: true, }, // TODO(gdbelvin): investigate batches of size > 150. // We are currently getting DB connection starvation: Too many connections. } { + if testing.Short() && tc.large { + t.Logf("testing.Short() is true. Skipping %v", tc.desc) + continue + } tree, _, err := newTreeWithHasher(ctx, env, tc.HashStrategy) if err != nil { t.Errorf("%v: newTreeWithHasher(%v): %v", tc.desc, tc.HashStrategy, err) diff --git a/scripts/presubmit.sh b/scripts/presubmit.sh index 589d3132b7..b547329350 100755 --- a/scripts/presubmit.sh +++ b/scripts/presubmit.sh @@ -93,7 +93,7 @@ main() { go build ${go_dirs} echo 'running go test' - go test -cover -timeout=5m ${goflags} ${go_dirs} + go test -cover -timeout=5m -short ${goflags} ${go_dirs} fi if [[ "${run_linters}" -eq 1 ]]; then From 6bd4d893d87e73ec7503e9f3a5015596bacc872f Mon Sep 17 00:00:00 2001 From: Gary Belvin Date: Wed, 2 Aug 2017 22:02:54 +0100 Subject: [PATCH 10/12] remove extra logging --- merkle/sparse_merkle_tree.go | 5 ----- 1 file changed, 5 deletions(-) diff --git a/merkle/sparse_merkle_tree.go b/merkle/sparse_merkle_tree.go index a3209d0052..704f1bd52b 100644 --- a/merkle/sparse_merkle_tree.go +++ b/merkle/sparse_merkle_tree.go @@ -19,7 +19,6 @@ import ( "context" "errors" "fmt" - "log" "math/big" "sync" @@ -387,10 +386,6 @@ func (s SparseMerkleTreeReader) RootAtRevision(ctx context.Context, rev int64) ( func (s SparseMerkleTreeReader) InclusionProof(ctx context.Context, rev int64, index []byte) ([][]byte, error) { nid := storage.NewNodeIDFromHash(index) sibs := nid.Siblings() - log.Printf("Siblings: ") - for _, s := range sibs { - log.Printf(" %x", s.Path) - } nodes, err := s.tx.GetMerkleNodes(ctx, rev, sibs) if err != nil { return nil, err From 57d04a7012c9691bface444d0871299cb99f7101 Mon Sep 17 00:00:00 2001 From: Gary Belvin Date: Thu, 3 Aug 2017 17:27:36 +0100 Subject: [PATCH 11/12] reapply subtree cache fix for roots --- storage/cache/map_subtree_cache.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/storage/cache/map_subtree_cache.go b/storage/cache/map_subtree_cache.go index b0352cf605..c691f2e649 100644 --- a/storage/cache/map_subtree_cache.go +++ b/storage/cache/map_subtree_cache.go @@ -58,8 +58,7 @@ func populateMapSubtreeNodes(treeID int64, hasher hashers.MapHasher) storage.Pop hs2 := merkle.NewHStar2(treeID, hasher) root, err := hs2.HStar2Nodes(st.Prefix, int(st.Depth), leaves, nil, func(depth int, index *big.Int, h []byte) error { - //if depth == 0 && len(st.Prefix) > 0 { - if depth == len(st.Prefix)*8 { + if depth == len(st.Prefix)*8 && len(st.Prefix) > 0 { // no space for the root in the node cache return nil } From f2c5b3783c57f9262df829583ce9ec4689bd1cc4 Mon Sep 17 00:00:00 2001 From: Gary Belvin Date: Thu, 3 Aug 2017 18:21:25 +0100 Subject: [PATCH 12/12] Add error tests to ParseSuffix, reviewer comments --- integration/maptest/map_test.go | 1 + merkle/hstar2_test.go | 2 +- storage/suffix_test.go | 35 ++++++++++++++++++++++++++++++++- 3 files changed, 36 insertions(+), 2 deletions(-) diff --git a/integration/maptest/map_test.go b/integration/maptest/map_test.go index ffb9c7582c..860d643e1a 100644 --- a/integration/maptest/map_test.go +++ b/integration/maptest/map_test.go @@ -245,6 +245,7 @@ func TestInclusion(t *testing.T) { {Index: h2b("0000000000000000000000000000000000000000000000000000000000000000"), LeafValue: []byte("A")}, {Index: h2b("0000000000000000000000000000000000000000000000000000000000000001"), LeafValue: []byte("B")}, {Index: h2b("0000000000000000000000000000000000000000000000000000000000000002"), LeafValue: []byte("C")}, + {Index: h2b("0000000000000000000000000000000000000000000000000000000000000003"), LeafValue: nil}, }, }, } { diff --git a/merkle/hstar2_test.go b/merkle/hstar2_test.go index 626c17d5a4..df2b6dc030 100644 --- a/merkle/hstar2_test.go +++ b/merkle/hstar2_test.go @@ -136,7 +136,7 @@ func rootsForTrimmedKeys(t *testing.T, prefixSize int, lh []HStar2LeafHash) []HS for i := range lh { subtreeDepth := s.hasher.BitLen() - prefixSize prefix := lh[i].Index.Bytes() - // ensure we've got any chopped off leading zero bytes + // Left pad prefix with zeros back out to 32 bytes. for len(prefix) < 32 { prefix = append([]byte{0}, prefix...) } diff --git a/storage/suffix_test.go b/storage/suffix_test.go index d637488a4b..1881c6b1a2 100644 --- a/storage/suffix_test.go +++ b/storage/suffix_test.go @@ -29,7 +29,40 @@ const ( // storage/cache when merkle no longer depends on storage.NodeID ) +//h2b6 takes a hex string and emits a base64 string +func h2b6(h string) string { + return base64.StdEncoding.EncodeToString(h2b(h)) +} + func TestParseSuffix(t *testing.T) { + for _, tc := range []struct { + suffix string + wantBits byte + wantPath []byte + wantErr bool + }{ + {h2b6("0100"), 1, h2b("00"), false}, + {h2b6("0801"), 8, h2b("01"), false}, + {"----", 1, h2b("00"), true}, + } { + sfx, err := ParseSuffix(tc.suffix) + if got, want := err != nil, tc.wantErr; got != want { + t.Errorf("ParseSuffix(%s): %v, wantErr: %v", tc.suffix, err, want) + continue + } + if err != nil { + continue + } + if got, want := sfx.Bits, tc.wantBits; got != want { + t.Errorf("ParseSuffix(%s).Bits: %v, want %v", tc.suffix, got, want) + } + if got, want := sfx.Path, tc.wantPath; !bytes.Equal(got, want) { + t.Errorf("ParseSuffix(%s).Path: %x, want %x", tc.suffix, got, want) + } + } +} + +func TestSplitParseSuffixRoundtrip(t *testing.T) { for _, tc := range []struct { prefix []byte leafIndex int64 @@ -51,7 +84,7 @@ func TestParseSuffix(t *testing.T) { t.Errorf("ParseSuffix(%s).Bits: %v, want %v", sfxKey, got, want) } if got, want := sfx.Path, sfxP.Path; !bytes.Equal(got, want) { - t.Errorf("ParseSuffix(%s).Bits: %x, want %x", sfxKey, got, want) + t.Errorf("ParseSuffix(%s).Path: %x, want %x", sfxKey, got, want) } } }