diff --git a/crypto/keys/mock_keys.go b/crypto/keys/mock_keys.go index e5c4ff211c..2b92906740 100644 --- a/crypto/keys/mock_keys.go +++ b/crypto/keys/mock_keys.go @@ -6,10 +6,11 @@ package keys import ( context "context" crypto "crypto" + reflect "reflect" + gomock "github.com/golang/mock/gomock" proto "github.com/golang/protobuf/proto" keyspb "github.com/google/trillian/crypto/keyspb" - reflect "reflect" ) // MockSignerFactory is a mock of SignerFactory interface diff --git a/integration/maptest/map_test.go b/integration/maptest/map_test.go index 7927602a11..688e5a52d6 100644 --- a/integration/maptest/map_test.go +++ b/integration/maptest/map_test.go @@ -88,7 +88,7 @@ func verifyGetMapLeavesResponse(getResp *trillian.GetMapLeavesResponse, indexes leafHash := incl.GetLeaf().GetLeafHash() proof := incl.GetInclusion() - if got, want := leafHash, hasher.HashLeaf(treeID, index, hasher.BitLen(), leaf); !bytes.Equal(got, want) { + if got, want := leafHash, hasher.HashLeaf(treeID, index, 0, leaf); !bytes.Equal(got, want) { return fmt.Errorf("HashLeaf(%s): %x, want %x", leaf, got, want) } if err := merkle.VerifyMapInclusionProof(treeID, index, @@ -140,6 +140,22 @@ func TestInclusion(t *testing.T) { {Index: h2b("0000000000000000000000000000000000000000000000000000000000000002"), LeafValue: []byte("C")}, }, }, + { + desc: "CONIKS across subtrees", + HashStrategy: trillian.HashStrategy_CONIKS_SHA512_256, + leaves: []*trillian.MapLeaf{ + {Index: h2b("0000000000000180000000000000000000000000000000000000000000000000"), LeafValue: []byte("Z")}, + }, + }, + { + desc: "CONIKS multi", + HashStrategy: trillian.HashStrategy_CONIKS_SHA512_256, + leaves: []*trillian.MapLeaf{ + {Index: h2b("0000000000000000000000000000000000000000000000000000000000000000"), LeafValue: []byte("A")}, + {Index: h2b("0000000000000000000000000000000000000000000000000000000000000001"), LeafValue: []byte("B")}, + {Index: h2b("0000000000000000000000000000000000000000000000000000000000000002"), LeafValue: []byte("C")}, + }, + }, } { tree, hasher, err := newTreeWithHasher(ctx, env, tc.HashStrategy) if err != nil { @@ -192,6 +208,7 @@ func TestInclusionBatch(t *testing.T) { HashStrategy trillian.HashStrategy batchSize, numBatches int }{ + { desc: "maphasher batch", HashStrategy: trillian.HashStrategy_TEST_MAP_HASHER, @@ -206,7 +223,7 @@ func TestInclusionBatch(t *testing.T) { } if err := RunMapBatchTest(ctx, env, tree, tc.batchSize, tc.numBatches); err != nil { - t.Errorf("%v: %v", tc.desc, err) + t.Errorf("BatchSize: %v, Batches: %v: %v", tc.batchSize, tc.numBatches, err) } } } @@ -356,8 +373,7 @@ func TestNonExistentLeaf(t *testing.T) { t.Errorf("len(leaf): %v, want, %v", got, want) } - if got, want := leafHash, - hasher.HashLeaf(tree.TreeId, index, hasher.BitLen(), leaf); !bytes.Equal(got, want) { + if got, want := leafHash, hasher.HashLeaf(tree.TreeId, index, 0, leaf); !bytes.Equal(got, want) { t.Errorf("HashLeaf(%s): %x, want %x", leaf, got, want) } if err := merkle.VerifyMapInclusionProof(tree.TreeId, index, diff --git a/log/sequencer.go b/log/sequencer.go index 46e2f0af95..b34ab8e02d 100644 --- a/log/sequencer.go +++ b/log/sequencer.go @@ -30,6 +30,7 @@ import ( "github.com/google/trillian/merkle" "github.com/google/trillian/merkle/hashers" "github.com/google/trillian/monitoring" + "github.com/google/trillian/node" "github.com/google/trillian/quota" "github.com/google/trillian/storage" "github.com/google/trillian/util" @@ -139,7 +140,7 @@ func (s Sequencer) buildMerkleTreeFromStorageAtRoot(ctx context.Context, root tr glog.Warningf("%v: Failed to create nodeID: %v", root.LogId, err) return nil, err } - nodes, err := tx.GetMerkleNodes(ctx, root.TreeRevision, []storage.NodeID{nodeID}) + nodes, err := tx.GetMerkleNodes(ctx, root.TreeRevision, []node.NodeID{nodeID}) if err != nil { glog.Warningf("%v: Failed to get Merkle nodes: %v", root.LogId, err) diff --git a/log/sequencer_test.go b/log/sequencer_test.go index 674ede149e..1272eabf27 100644 --- a/log/sequencer_test.go +++ b/log/sequencer_test.go @@ -29,6 +29,7 @@ import ( "github.com/google/trillian/crypto/keys" "github.com/google/trillian/crypto/sigpb" "github.com/google/trillian/merkle/rfc6962" + "github.com/google/trillian/node" "github.com/google/trillian/quota" "github.com/google/trillian/storage" stestonly "github.com/google/trillian/storage/testonly" @@ -59,10 +60,10 @@ var testRoot16 = trillian.SignedLogRoot{ // These will be accepted in either order because of custom sorting in the mock var updatedNodes = []storage.Node{ - {NodeID: storage.NodeID{Path: []uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10}, PrefixLenBits: 64}, + {NodeID: node.NodeID{Path: []uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10}, PrefixLenBits: 64}, Hash: testonly.MustDecodeBase64("L5Iyd7aFOVewxiRm29xD+EU+jvEo4RfufBijKdflWMk="), NodeRevision: 6}, { - NodeID: storage.NodeID{Path: []uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, PrefixLenBits: 59}, + NodeID: node.NodeID{Path: []uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, PrefixLenBits: 59}, Hash: testonly.MustDecodeBase64("R57DrKTGuZdjCNXjv6InGrm4rABLOn9yWpdHmYOoLwU="), NodeRevision: 6}, } diff --git a/merkle/common.go b/merkle/common.go index e2dce5ada1..554c8d72d3 100644 --- a/merkle/common.go +++ b/merkle/common.go @@ -32,3 +32,37 @@ func bit(index []byte, i int) uint { bIndex := (IndexBits - i - 1) / 8 return uint((index[bIndex] >> uint(i%8)) & 0x01) } + +// flipBit returns index with the i'th bit from the right flipped. +func flipBit(index []byte, i int) []byte { + r := make([]byte, len(index)) + copy(r, index) + IndexBits := len(index) * 8 + bIndex := (IndexBits - i - 1) / 8 + r[bIndex] ^= 1 << uint(i%8) + return r +} + +// Neighbor returns index with only the left i bits set and the i'th bit flipped. +func Neighbor(index []byte, i int) []byte { + r := flipBit(index, i) + return MaskIndex(r, len(index)*8-i) +} + +// leftmask contains bitmasks indexed such that the left x bits are set. It is +// indexed by byte position from 0-7 0 is special cased to 0xFF since 8 mod 8 +// is 0. leftmask is only used to mask the last byte. +var leftmask = [8]byte{0xFF, 0x80, 0xC0, 0xE0, 0xF0, 0xF8, 0xFC, 0xFE} + +// MaskIndex returns index with only the left depth bits set. +func MaskIndex(index []byte, depth int) []byte { + r := make([]byte, len(index)) + if depth > 0 { + // Copy the first depthBytes. + depthBytes := (depth + 7) >> 3 + copy(r, index[:depthBytes]) + // Mask off unwanted bits in the last byte. + r[depthBytes-1] = r[depthBytes-1] & leftmask[depth%8] + } + return r +} diff --git a/merkle/common_test.go b/merkle/common_test.go index 40dbd21773..7e97574b58 100644 --- a/merkle/common_test.go +++ b/merkle/common_test.go @@ -15,6 +15,7 @@ package merkle import ( + "bytes" "testing" ) @@ -41,3 +42,57 @@ func TestBit(t *testing.T) { } } } + +func TestFlipBit(t *testing.T) { + for _, tc := range []struct { + index []byte + i int + want []byte + }{ + {index: h2b("00"), i: 0, want: h2b("01")}, + {index: h2b("00"), i: 7, want: h2b("80")}, + {index: h2b("000b"), i: 0, want: h2b("000a")}, + {index: h2b("000b"), i: 1, want: h2b("0009")}, + {index: h2b("000b"), i: 2, want: h2b("000f")}, + {index: h2b("000b"), i: 3, want: h2b("0003")}, + {index: h2b("0001"), i: 0, want: h2b("0000")}, + {index: h2b("8000"), i: 15, want: h2b("0000")}, + {index: h2b("0000000000000001"), i: 0, want: h2b("0000000000000000")}, + {index: h2b("0000000000010000"), i: 16, want: h2b("0000000000000000")}, + {index: h2b("8000000000000000"), i: 63, want: h2b("0000000000000000")}, + } { + if got, want := flipBit(tc.index, tc.i), tc.want; !bytes.Equal(got, want) { + t.Errorf("flipBit(%x, %d): %x, want %x", tc.index, tc.i, got, want) + } + } +} + +func TestMaskIndex(t *testing.T) { + for _, tc := range []struct { + index []byte + depth int + want []byte + }{ + {index: h2b("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"), depth: 0, want: h2b("0000000000000000000000000000000000000000")}, + {index: h2b("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"), depth: 1, want: h2b("8000000000000000000000000000000000000000")}, + {index: h2b("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"), depth: 2, want: h2b("C000000000000000000000000000000000000000")}, + {index: h2b("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"), depth: 3, want: h2b("E000000000000000000000000000000000000000")}, + {index: h2b("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"), depth: 4, want: h2b("F000000000000000000000000000000000000000")}, + {index: h2b("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"), depth: 5, want: h2b("F800000000000000000000000000000000000000")}, + {index: h2b("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"), depth: 6, want: h2b("FC00000000000000000000000000000000000000")}, + {index: h2b("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"), depth: 7, want: h2b("FE00000000000000000000000000000000000000")}, + {index: h2b("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"), depth: 8, want: h2b("FF00000000000000000000000000000000000000")}, + {index: h2b("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"), depth: 9, want: h2b("FF80000000000000000000000000000000000000")}, + {index: h2b("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"), depth: 10, want: h2b("FFC0000000000000000000000000000000000000")}, + {index: h2b("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"), depth: 159, want: h2b("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFE")}, + {index: h2b("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"), depth: 160, want: h2b("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF")}, + {index: h2b("000102030405060708090A0B0C0D0E0F10111213"), depth: 1, want: h2b("0000000000000000000000000000000000000000")}, + {index: h2b("000102030405060708090A0B0C0D0E0F10111213"), depth: 17, want: h2b("0001000000000000000000000000000000000000")}, + {index: h2b("000102030405060708090A0B0C0D0E0F10111213"), depth: 159, want: h2b("000102030405060708090A0B0C0D0E0F10111212")}, + {index: h2b("000102030405060708090A0B0C0D0E0F10111213"), depth: 160, want: h2b("000102030405060708090A0B0C0D0E0F10111213")}, + } { + if got, want := MaskIndex(tc.index, tc.depth), tc.want; !bytes.Equal(got, want) { + t.Errorf("maskIndex(%x, %v): %x, want %x", tc.index, tc.depth, got, want) + } + } +} diff --git a/merkle/hstar2.go b/merkle/hstar2.go index 1cbfee32ed..313f0bcae1 100644 --- a/merkle/hstar2.go +++ b/merkle/hstar2.go @@ -22,13 +22,15 @@ import ( "github.com/golang/glog" "github.com/google/trillian/merkle/hashers" + "github.com/google/trillian/node" + "github.com/google/trillian/storage" ) var ( - // ErrNegativeTreeLevelOffset indicates a negative level was specified. - ErrNegativeTreeLevelOffset = errors.New("treeLevelOffset cannot be negative") - smtOne = big.NewInt(1) - smtZero = big.NewInt(0) + // ErrSubtreeOverrun indicates that a subtree exceeds the maximum tree depth. + ErrSubtreeOverrun = errors.New("subtree with prefix exceeds maximum tree size") + smtOne = big.NewInt(1) + smtZero = big.NewInt(0) ) // HStar2LeafHash represents a leaf for the HStar2 sparse Merkle tree @@ -54,15 +56,11 @@ func NewHStar2(treeID int64, hasher hashers.MapHasher) HStar2 { } } -// HStar2Root calculates the root of a sparse Merkle tree of depth n which contains -// the given set of non-null leaves. -func (s *HStar2) HStar2Root(n int, values []HStar2LeafHash) ([]byte, error) { +// HStar2Root calculates the root of a sparse Merkle tree of a given depth +// which contains the given set of non-null leaves. +func (s *HStar2) HStar2Root(depth int, values []HStar2LeafHash) ([]byte, error) { sort.Sort(ByIndex{values}) - return s.hStar2b(n, values, smtZero, - func(depth int, index *big.Int) ([]byte, error) { - return s.hasher.HashEmpty(s.treeID, PaddedBytes(index, s.hasher.Size()), depth), nil - }, - func(int, *big.Int, []byte) error { return nil }) + return s.hStar2b(0, depth, values, smtZero, nil, nil) } // SparseGetNodeFunc should return any pre-existing node hash for the node address. @@ -76,78 +74,87 @@ type SparseSetNodeFunc func(depth int, index *big.Int, hash []byte) error // internal node values. Values must not contain multiple leaves for the same // index. // -// The treeLevelOffset argument is used when the tree to be calculated is part -// of a larger tree. It identifes the level in the larger tree at which the -// root of the subtree being calculated is found. -// e.g. Imagine a tree 256 levels deep, and that you already (somehow) happen -// to have the intermediate hash values for the non-null nodes 8 levels below -// the root already calculated (i.e. you just need to calculate the top 8 -// levels of a 256-level tree). To do this, you'd set treeDepth=8, and -// treeLevelOffset=248 (256-8). -func (s *HStar2) HStar2Nodes(treeDepth, treeLevelOffset int, values []HStar2LeafHash, get SparseGetNodeFunc, set SparseSetNodeFunc) ([]byte, error) { +// prefix is the location of this subtree within the larger tree. Root is at nil. +// subtreeDepth is the number of levels in this subtree. +func (s *HStar2) HStar2Nodes(prefix []byte, subtreeDepth int, values []HStar2LeafHash, + get SparseGetNodeFunc, set SparseSetNodeFunc) ([]byte, error) { if glog.V(3) { - glog.Infof("HStar2Nodes(%v, %v, %v)", treeDepth, treeLevelOffset, len(values)) + glog.Infof("HStar2Nodes(%x, %v, %v)", prefix, subtreeDepth, len(values)) for _, v := range values { glog.Infof(" %x: %x", v.Index.Bytes(), v.LeafHash) } } - if treeLevelOffset < 0 { - return nil, ErrNegativeTreeLevelOffset + depth := len(prefix) * 8 + totalDepth := depth + subtreeDepth + if totalDepth > s.hasher.BitLen() { + return nil, ErrSubtreeOverrun } sort.Sort(ByIndex{values}) - return s.hStar2b(treeDepth, values, smtZero, - func(depth int, index *big.Int) ([]byte, error) { - // if we've got a function for getting existing node values, try it: - h, err := get(treeDepth-depth, index) - if err != nil { - return nil, err - } - // if we got a value then we'll use that - if h != nil { - return h, nil - } - // otherwise just return the null hash for this level - return s.hasher.HashEmpty(s.treeID, PaddedBytes(index, s.hasher.Size()), depth+treeLevelOffset), nil - }, - func(depth int, index *big.Int, hash []byte) error { - return set(treeDepth-depth, index, hash) - }) + offset := node.NewNodeIDFromPrefixSuffix(prefix, node.Suffix{}, s.hasher.BitLen()).BigInt() + return s.hStar2b(depth, totalDepth, values, offset, get, set) } -// hStar2b is the recursive implementation for calculating a sparse Merkle tree -// root value. -func (s *HStar2) hStar2b(n int, values []HStar2LeafHash, offset *big.Int, get SparseGetNodeFunc, set SparseSetNodeFunc) ([]byte, error) { - if n == 0 { +// hStar2b computes a sparse Merkle tree root value recursively. +func (s *HStar2) hStar2b(depth, maxDepth int, values []HStar2LeafHash, offset *big.Int, + get SparseGetNodeFunc, set SparseSetNodeFunc) ([]byte, error) { + if depth == maxDepth { switch { case len(values) == 0: - return get(n, offset) - case len(values) != 1: + return s.get(offset, depth, get) + case len(values) == 1: + return values[0].LeafHash, nil + default: return nil, fmt.Errorf("hStar2b base case: len(values): %d, want 1", len(values)) } - return values[0].LeafHash, nil } if len(values) == 0 { - return get(n, offset) + return s.get(offset, depth, get) } - split := new(big.Int).Lsh(smtOne, uint(n-1)) + bitsLeft := s.hasher.BitLen() - depth + split := new(big.Int).Lsh(smtOne, uint(bitsLeft-1)) split.Add(split, offset) i := sort.Search(len(values), func(i int) bool { return values[i].Index.Cmp(split) >= 0 }) - lhs, err := s.hStar2b(n-1, values[:i], offset, get, set) + lhs, err := s.hStar2b(depth+1, maxDepth, values[:i], offset, get, set) if err != nil { return nil, err } - rhs, err := s.hStar2b(n-1, values[i:], split, get, set) + rhs, err := s.hStar2b(depth+1, maxDepth, values[i:], split, get, set) if err != nil { return nil, err } h := s.hasher.HashChildren(lhs, rhs) - if set != nil { - set(n, offset, h) - } + s.set(offset, depth, h, set) return h, nil } +// get attempts to use getter. If getter fails, returns the HashEmpty value. +func (s *HStar2) get(index *big.Int, depth int, getter SparseGetNodeFunc) ([]byte, error) { + // if we've got a function for getting existing node values, try it: + if getter != nil { + h, err := getter(depth, index) + if err != nil { + return nil, err + } + // if we got a value then we'll use that + if h != nil { + return h, nil + } + } + // TODO(gdbelvin): Hashers should accept depth as their main argument. + height := s.hasher.BitLen() - depth + nodeID := storage.NewNodeIDFromBigInt(index.BitLen(), index, s.hasher.BitLen()) + return s.hasher.HashEmpty(s.treeID, nodeID.Path, height), nil +} + +// set attempts to use setter if it not nil. +func (s *HStar2) set(index *big.Int, depth int, hash []byte, setter SparseSetNodeFunc) error { + if setter != nil { + return setter(depth, index, hash) + } + return nil +} + // HStar2LeafHash sorting boilerplate below. // Leaves is a slice of HStar2LeafHash @@ -164,13 +171,3 @@ type ByIndex struct{ Leaves } // Less returns true if i.Index < j.Index func (s ByIndex) Less(i, j int) bool { return s.Leaves[i].Index.Cmp(s.Leaves[j].Index) < 0 } - -// PaddedBytes takes a big.Int and returns it's value, left padded with zeros. -// e.g. 1 -> 0000000000000000000000000000000000000001 -func PaddedBytes(i *big.Int, size int) []byte { - b := i.Bytes() - ret := make([]byte, size) - padBytes := len(ret) - len(b) - copy(ret[padBytes:], b) - return ret -} diff --git a/merkle/hstar2_test.go b/merkle/hstar2_test.go index 43e1277443..894d2b17f0 100644 --- a/merkle/hstar2_test.go +++ b/merkle/hstar2_test.go @@ -22,6 +22,7 @@ import ( "github.com/google/trillian/merkle/hashers" "github.com/google/trillian/merkle/maphasher" + "github.com/google/trillian/node" "github.com/google/trillian/testonly" ) @@ -87,7 +88,7 @@ func TestHStar2SimpleDataSetKAT(t *testing.T) { continue } if got, want := root, x.root; !bytes.Equal(got, want) { - t.Errorf("Root: \n%x, want:\n%x", got, want) + t.Errorf("Root: %x, want: %x", got, want) } } } @@ -107,7 +108,7 @@ func TestHStar2GetSet(t *testing.T) { if len(values) != 1 { t.Fatalf("Should only have 1 leaf per run, got %d", len(values)) } - root, err := s.HStar2Nodes(s.hasher.BitLen(), 0, values, + root, err := s.HStar2Nodes(nil, s.hasher.BitLen(), values, func(depth int, index *big.Int) ([]byte, error) { return cache[fmt.Sprintf("%x/%d", index, depth)], nil }, @@ -120,7 +121,7 @@ func TestHStar2GetSet(t *testing.T) { continue } if got, want := root, x.root; !bytes.Equal(got, want) { - t.Errorf("Root:\n%x, want:\n%x", got, want) + t.Errorf("Root: %x, want: %x", got, want) } } } @@ -130,20 +131,25 @@ func TestHStar2GetSet(t *testing.T) { // 256-prefixSize, and can be passed in as leaves to top-subtree calculation. func rootsForTrimmedKeys(t *testing.T, prefixSize int, lh []HStar2LeafHash) []HStar2LeafHash { var ret []HStar2LeafHash - s := NewHStar2(treeID, maphasher.Default) + hasher := maphasher.Default + s := NewHStar2(treeID, hasher) for i := range lh { - prefix := new(big.Int).Rsh(lh[i].Index, uint(s.hasher.BitLen()-prefixSize)) - b := lh[i].Index.Bytes() - // ensure we've got any chopped of leading zero bytes - for len(b) < 32 { - b = append([]byte{0}, b...) + subtreeDepth := s.hasher.BitLen() - prefixSize + prefix := lh[i].Index.Bytes() + // ensure we've got any chopped off leading zero bytes + for len(prefix) < 32 { + prefix = append([]byte{0}, prefix...) } - lh[i].Index.SetBytes(b[prefixSize/8:]) - root, err := s.HStar2Root(s.hasher.BitLen()-prefixSize, []HStar2LeafHash{lh[i]}) + prefix = prefix[:prefixSize/8] // We only want the first prefixSize bytes. + root, err := s.HStar2Nodes(prefix, subtreeDepth, []HStar2LeafHash{lh[i]}, nil, nil) if err != nil { t.Fatalf("Failed to calculate root %v", err) } - ret = append(ret, HStar2LeafHash{prefix, root}) + + ret = append(ret, HStar2LeafHash{ + Index: node.NewNodeIDFromPrefixSuffix(prefix, node.Suffix{}, hasher.BitLen()).BigInt(), + LeafHash: root, + }) } return ret } @@ -163,15 +169,13 @@ func TestHStar2OffsetRootKAT(t *testing.T) { leaves := createHStar2Leaves(treeID, maphasher.Default, iv...) intermediates := rootsForTrimmedKeys(t, size, leaves) - root, err := s.HStar2Nodes(size, s.hasher.BitLen()-size, intermediates, - func(int, *big.Int) ([]byte, error) { return nil, nil }, - func(int, *big.Int, []byte) error { return nil }) + root, err := s.HStar2Nodes(nil, size, intermediates, nil, nil) if err != nil { t.Errorf("Failed to calculate root at iteration %d: %v", i, err) continue } if got, want := root, x.root; !bytes.Equal(got, want) { - t.Errorf("Root: %x, want: %x", got, want) + t.Errorf("HStar2Nodes(i: %v, size:%v): %x, want: %x", i, size, got, want) } } } @@ -180,27 +184,8 @@ func TestHStar2OffsetRootKAT(t *testing.T) { func TestHStar2NegativeTreeLevelOffset(t *testing.T) { s := NewHStar2(treeID, maphasher.Default) - _, err := s.HStar2Nodes(32, -1, []HStar2LeafHash{}, - func(int, *big.Int) ([]byte, error) { return nil, nil }, - func(int, *big.Int, []byte) error { return nil }) - if got, want := err, ErrNegativeTreeLevelOffset; got != want { + _, err := s.HStar2Nodes(make([]byte, 31), 9, []HStar2LeafHash{}, nil, nil) + if got, want := err, ErrSubtreeOverrun; got != want { t.Fatalf("Hstar2Nodes(): %v, want %v", got, want) } } - -func TestPaddedBytes(t *testing.T) { - size := 160 / 8 - for _, tc := range []struct { - i *big.Int - want []byte - }{ - {i: big.NewInt(0), want: h2b("0000000000000000000000000000000000000000")}, - {i: big.NewInt(1), want: h2b("0000000000000000000000000000000000000001")}, - {i: new(big.Int).SetBytes(h2b("00FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF0F")), want: h2b("00FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF0F")}, - {i: new(big.Int).SetBytes(h2b("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF0F")), want: h2b("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF0F")}, - } { - if got, want := PaddedBytes(tc.i, size), tc.want; !bytes.Equal(got, want) { - t.Errorf("PaddedBytes(%d): %x, want %x", tc.i, got, want) - } - } -} diff --git a/merkle/map_verifier.go b/merkle/map_verifier.go index f249cf9571..7392e421d9 100644 --- a/merkle/map_verifier.go +++ b/merkle/map_verifier.go @@ -48,7 +48,8 @@ func VerifyMapInclusionProof(treeID int64, index, leafHash, expectedRoot []byte, proofIsRightHandElement := bit(index, level) == 0 pElement := proof[level] if len(pElement) == 0 { - pElement = h.HashEmpty(treeID, index, level) + neighborIndex := Neighbor(index, level) + pElement = h.HashEmpty(treeID, neighborIndex, level) } if proofIsRightHandElement { runningHash = h.HashChildren(runningHash, pElement) diff --git a/merkle/merkle_path.go b/merkle/merkle_path.go index 789de15242..9347765001 100644 --- a/merkle/merkle_path.go +++ b/merkle/merkle_path.go @@ -20,6 +20,7 @@ import ( "github.com/golang/glog" terr "github.com/google/trillian/errors" + "github.com/google/trillian/node" "github.com/google/trillian/storage" ) @@ -30,7 +31,7 @@ const vvLevel = 4 // NodeFetch bundles a nodeID with additional information on how to use the node to construct the // correct proof. type NodeFetch struct { - NodeID storage.NodeID + NodeID node.NodeID Rehash bool } @@ -106,20 +107,20 @@ func snapshotConsistency(snapshot1, snapshot2, treeSize int64, maxBitLen int) ([ glog.V(vLevel).Infof("snapshotConsistency: %d -> %d", snapshot1, snapshot2) level := 0 - node := snapshot1 - 1 + index := snapshot1 - 1 // Compute the (compressed) path to the root of snapshot2. // Everything left of 'node' is equal in both trees; no need to record. - for (node & 1) != 0 { - glog.V(vvLevel).Infof("Move up: l:%d n:%d", level, node) - node >>= 1 + for (index & 1) != 0 { + glog.V(vvLevel).Infof("Move up: l:%d n:%d", level, index) + index >>= 1 level++ } - if node != 0 { - glog.V(vvLevel).Infof("Not root snapshot1: %d", node) - // Not at the root of snapshot 1, record the node - n, err := storage.NewNodeIDForTreeCoords(int64(level), node, maxBitLen) + if index != 0 { + glog.V(vvLevel).Infof("Not root snapshot1: %d", index) + // Not at the root of snapshot 1, record the index + n, err := node.NewNodeIDForTreeCoords(int64(level), index, maxBitLen) if err != nil { return nil, err } @@ -127,15 +128,15 @@ func snapshotConsistency(snapshot1, snapshot2, treeSize int64, maxBitLen int) ([ } // Now append the path from this node to the root of snapshot2. - p, err := pathFromNodeToRootAtSnapshot(node, level, snapshot2, treeSize, maxBitLen) + p, err := pathFromNodeToRootAtSnapshot(index, level, snapshot2, treeSize, maxBitLen) if err != nil { return nil, err } return append(proof, p...), nil } -func pathFromNodeToRootAtSnapshot(node int64, level int, snapshot, treeSize int64, maxBitLen int) ([]NodeFetch, error) { - glog.V(vLevel).Infof("pathFromNodeToRootAtSnapshot(%d, %d, %d, %d, %d)", node, level, snapshot, treeSize, maxBitLen) +func pathFromNodeToRootAtSnapshot(index int64, level int, snapshot, treeSize int64, maxBitLen int) ([]NodeFetch, error) { + glog.V(vLevel).Infof("pathFromNodeToRootAtSnapshot(%d, %d, %d, %d, %d)", index, level, snapshot, treeSize, maxBitLen) proof := make([]NodeFetch, 0, bitLen(snapshot)+1) if snapshot == 0 { @@ -147,7 +148,7 @@ func pathFromNodeToRootAtSnapshot(node int64, level int, snapshot, treeSize int6 // Move up, recording the sibling of the current node at each level. for lastNode != 0 { - sibling := node ^ 1 + sibling := index ^ 1 if sibling < lastNode { // The sibling is not the last node of the level in the snapshot tree glog.V(vvLevel).Infof("Not last: S:%d L:%d", sibling, level) @@ -166,7 +167,7 @@ func pathFromNodeToRootAtSnapshot(node int64, level int, snapshot, treeSize int6 // No recomputation required as we're using the tree in its current state // Account for non existent nodes - these can only be the rightmost node at an // intermediate (non leaf) level in the tree so will always be a right sibling. - n, err := siblingIDSkipLevels(snapshot, lastNode, level, node, maxBitLen) + n, err := siblingIDSkipLevels(snapshot, lastNode, level, index, maxBitLen) if err != nil { return nil, err } @@ -191,7 +192,7 @@ func pathFromNodeToRootAtSnapshot(node int64, level int, snapshot, treeSize int6 } // Sibling > lastNode so does not exist, move up - node >>= 1 + index >>= 1 lastNode >>= 1 level++ } @@ -435,7 +436,7 @@ func checkRecomputation(fetches []NodeFetch) error { // siblingIDSkipLevels creates a new NodeID for the supplied node, accounting for levels skipped // in storage. Note that it returns an ID for the node sibling so care should be taken to pass the // correct value for the node parameter. -func siblingIDSkipLevels(snapshot, lastNode int64, level int, node int64, maxBitLen int) (storage.NodeID, error) { - l, sibling := skipMissingLevels(snapshot, lastNode, level, node) - return storage.NewNodeIDForTreeCoords(int64(l), sibling, maxBitLen) +func siblingIDSkipLevels(snapshot, lastNode int64, level int, index int64, maxBitLen int) (node.NodeID, error) { + l, sibling := skipMissingLevels(snapshot, lastNode, level, index) + return node.NewNodeIDForTreeCoords(int64(l), sibling, maxBitLen) } diff --git a/merkle/sparse_merkle_tree.go b/merkle/sparse_merkle_tree.go index a38903b04d..613d1f5876 100644 --- a/merkle/sparse_merkle_tree.go +++ b/merkle/sparse_merkle_tree.go @@ -24,6 +24,7 @@ import ( "github.com/golang/glog" "github.com/google/trillian/merkle/hashers" + "github.com/google/trillian/node" "github.com/google/trillian/storage" ) @@ -56,6 +57,7 @@ type indexAndHash struct { // rootHashOrError represents a (sub-)tree root hash, or an error which // prevented the calculation from completing. +// TODO(gdbelvin): represent an empty subtree with a nil hash? type rootHashOrError struct { hash []byte err error @@ -109,7 +111,7 @@ type subtreeWriter struct { tx storage.TreeTX treeRevision int64 - treeHasher hashers.MapHasher + hasher hashers.MapHasher getSubtree getSubtreeFunc } @@ -151,30 +153,31 @@ func (s *subtreeWriter) getOrCreateChildSubtree(ctx context.Context, childPrefix return subtree, nil } -// SetLeaf sets a single leaf hash for incorporation into the sparse Merkle -// tree. +// SetLeaf sets a single leaf hash for incorporation into the sparse Merkle tree. +// index is the full path of the leaf, starting from the root (not the subtree's root). func (s *subtreeWriter) SetLeaf(ctx context.Context, index []byte, hash []byte) error { - indexLen := len(index) * 8 + depth := len(index) * 8 + absSubtreeDepth := len(s.prefix)*8 + s.subtreeDepth switch { - case indexLen < s.subtreeDepth: - return fmt.Errorf("index length %d is < our depth %d", indexLen, s.subtreeDepth) + case depth < absSubtreeDepth: + return fmt.Errorf("depth: %d, want >= %d", depth, absSubtreeDepth) - case indexLen > s.subtreeDepth: - childPrefix := index[:s.subtreeDepth/8] + case depth > absSubtreeDepth: + childPrefix := index[:absSubtreeDepth/8] subtree, err := s.getOrCreateChildSubtree(ctx, childPrefix) if err != nil { return err } - return subtree.SetLeaf(ctx, index[s.subtreeDepth/8:], hash) + return subtree.SetLeaf(ctx, index, hash) - case indexLen == s.subtreeDepth: - s.leafQueue <- func() (*indexAndHash, error) { return &indexAndHash{index: index, hash: hash}, nil } + default: // depth == absSubtreeDepth: + s.leafQueue <- func() (*indexAndHash, error) { + return &indexAndHash{index: index, hash: hash}, nil + } return nil } - - return fmt.Errorf("internal logic error in SetLeaf. index length: %d, subtreeDepth: %d", indexLen, s.subtreeDepth) } // CalculateRoot initiates the process of calculating the subtree root. @@ -209,49 +212,51 @@ func (s *subtreeWriter) buildSubtree(ctx context.Context) { s.root <- rootHashOrError{hash: nil, err: err} return } - leaves = append(leaves, HStar2LeafHash{Index: new(big.Int).SetBytes(ih.index), LeafHash: ih.hash}) + nodeID := node.NewNodeIDFromPrefixSuffix(ih.index, node.Suffix{}, s.hasher.BitLen()) + + leaves = append(leaves, HStar2LeafHash{ + Index: nodeID.BigInt(), + LeafHash: ih.hash, + }) nodesToStore = append(nodesToStore, storage.Node{ - NodeID: storage.NewNodeIDFromHash(bytes.Join([][]byte{s.prefix, ih.index}, []byte{})), + NodeID: nodeID, Hash: ih.hash, NodeRevision: s.treeRevision, }) - } // calculate new root, and intermediate nodes: - hs2 := NewHStar2(s.treeID, s.treeHasher) - treeDepthOffset := (s.treeHasher.Size()-len(s.prefix))*8 - s.subtreeDepth - totalDepth := len(s.prefix)*8 + s.subtreeDepth - root, err := hs2.HStar2Nodes(s.subtreeDepth, treeDepthOffset, leaves, - func(height int, index *big.Int) ([]byte, error) { - nodeID := storage.NewNodeIDFromRelativeBigInt(s.prefix, s.subtreeDepth, height, index, totalDepth) + hs2 := NewHStar2(s.treeID, s.hasher) + root, err := hs2.HStar2Nodes(s.prefix, s.subtreeDepth, leaves, + func(depth int, index *big.Int) ([]byte, error) { + nodeID := storage.NewNodeIDFromBigInt(depth, index, s.hasher.BitLen()) glog.V(4).Infof("buildSubtree.get(%x, %d) nid: %x, %v", - index.Bytes(), height, nodeID.Path, nodeID.PrefixLenBits) - nodes, err := s.tx.GetMerkleNodes(ctx, s.treeRevision, []storage.NodeID{nodeID}) + index.Bytes(), depth, nodeID.Path, nodeID.PrefixLenBits) + nodes, err := s.tx.GetMerkleNodes(ctx, s.treeRevision, []node.NodeID{nodeID}) if err != nil { return nil, err } if len(nodes) == 0 { return nil, nil } - if expected, got := nodeID, nodes[0].NodeID; !expected.Equivalent(got) { - return nil, fmt.Errorf("expected node ID %s from storage, but got %s", expected.String(), got.String()) + if got, want := nodes[0].NodeID, nodeID; !got.Equivalent(want) { + return nil, fmt.Errorf("got node %s from storage, want %s", got, want) } - if expected, got := s.treeRevision, nodes[0].NodeRevision; got > expected { - return nil, fmt.Errorf("expected node revision <= %d, but got %d", expected, got) + if got, want := nodes[0].NodeRevision, s.treeRevision; got > want { + return nil, fmt.Errorf("got node revision %d, want <= %d", got, want) } return nodes[0].Hash, nil }, - func(height int, index *big.Int, h []byte) error { + func(depth int, index *big.Int, h []byte) error { // Don't store the root node of the subtree - that's part of the parent // tree. - if height == 0 && len(s.prefix) > 0 { + if depth == len(s.prefix)*8 && len(s.prefix) > 0 { return nil } - nodeID := storage.NewNodeIDFromRelativeBigInt(s.prefix, s.subtreeDepth, height, index, totalDepth) + nodeID := storage.NewNodeIDFromBigInt(depth, index, s.hasher.BitLen()) glog.V(4).Infof("buildSubtree.set(%x, %v) nid: %x, %v : %x", - index.Bytes(), height, nodeID.Path, nodeID.PrefixLenBits, h) + index.Bytes(), depth, nodeID.Path, nodeID.PrefixLenBits, h) nodesToStore = append(nodesToStore, storage.Node{ NodeID: nodeID, @@ -321,7 +326,7 @@ func newLocalSubtreeWriter(ctx context.Context, treeID, rev int64, prefix []byte root: make(chan rootHashOrError, 1), children: make(map[string]Subtree), tx: tx, - treeHasher: h, + hasher: h, getSubtree: func(ctx context.Context, p []byte) (Subtree, error) { myPrefix := bytes.Join([][]byte{prefix, p}, []byte{}) return newLocalSubtreeWriter(ctx, treeID, rev, myPrefix, depths[1:], newTX, h) @@ -354,8 +359,8 @@ func NewSparseMerkleTreeWriter(ctx context.Context, treeID, rev int64, h hashers // RootAtRevision returns the sparse Merkle tree root hash at the specified // revision, or ErrNoSuchRevision if the requested revision doesn't exist. func (s SparseMerkleTreeReader) RootAtRevision(ctx context.Context, rev int64) ([]byte, error) { - rootNodeID := storage.NewEmptyNodeID(256) - nodes, err := s.tx.GetMerkleNodes(ctx, rev, []storage.NodeID{rootNodeID}) + rootNodeID := node.NewEmptyNodeID(256) + nodes, err := s.tx.GetMerkleNodes(ctx, rev, []node.NodeID{rootNodeID}) if err != nil { return nil, err } diff --git a/merkle/sparse_merkle_tree_test.go b/merkle/sparse_merkle_tree_test.go index 91f36b44a5..e612c10ce0 100644 --- a/merkle/sparse_merkle_tree_test.go +++ b/merkle/sparse_merkle_tree_test.go @@ -31,6 +31,7 @@ import ( "github.com/golang/glog" "github.com/golang/mock/gomock" "github.com/google/trillian/merkle/maphasher" + "github.com/google/trillian/node" "github.com/google/trillian/storage" "github.com/google/trillian/testonly" ) @@ -43,11 +44,11 @@ var ( // These nodes were generated randomly and reviewed to ensure node IDs do not collide with // those fetched during the test. var inclusionProofIncorrectTestNodes = []storage.Node{ - {NodeID: storage.NodeID{Path: []uint8{0x2c, 0x8b, 0xcf, 0xe1, 0xc5, 0x71, 0xf4, 0x2d, 0xc2, 0xe9, 0x22, 0x7d, 0x91, 0xd5, 0x93, 0x70, 0x8f, 0x8c, 0x40, 0xca, 0xf, 0xd3, 0xd8, 0x4b, 0x43, 0x6a, 0x3, 0x2f, 0xf1, 0x4, 0x7, 0x9b}, PrefixLenBits: 174}, Hash: []uint8{0x4, 0x7b, 0xe5, 0xab, 0x12, 0x2d, 0x44, 0x98, 0xd8, 0xcc, 0xc7, 0x27, 0x4d, 0xc5, 0xda, 0x59, 0x38, 0xf5, 0x4d, 0x9c, 0x98, 0x33, 0x2a, 0x95, 0xb1, 0x20, 0xe2, 0x8c, 0x7, 0x5f, 0xb5, 0x9a}, NodeRevision: 34}, - {NodeID: storage.NodeID{Path: []uint8{0x7c, 0xf5, 0x65, 0xc6, 0xd5, 0xbe, 0x2d, 0x39, 0xff, 0xf4, 0x58, 0xc2, 0x9f, 0x4f, 0x9, 0x3c, 0x54, 0x62, 0xf5, 0x35, 0x19, 0x87, 0x56, 0xb5, 0x4c, 0x6c, 0x11, 0xf3, 0xd7, 0x2, 0xc, 0x80}, PrefixLenBits: 234}, Hash: []uint8{0xbc, 0x33, 0xbe, 0x74, 0x79, 0x43, 0x59, 0x83, 0x5d, 0x93, 0x87, 0x13, 0x22, 0x98, 0xa0, 0x69, 0xed, 0xa5, 0xca, 0xfb, 0x7c, 0x16, 0x91, 0x51, 0xa2, 0xb, 0x9f, 0x17, 0xe4, 0x3f, 0xe3, 0x3}, NodeRevision: 34}, - {NodeID: storage.NodeID{Path: []uint8{0x5f, 0xc6, 0x73, 0x1c, 0x5d, 0x57, 0x23, 0xdc, 0x6a, 0xd, 0x38, 0xcb, 0x41, 0x25, 0x97, 0x2, 0x63, 0x8d, 0xa, 0x2d, 0xbe, 0x8e, 0x88, 0xff, 0x9e, 0x54, 0x5b, 0xb4, 0x5d, 0x4e, 0x6e, 0x5b}, PrefixLenBits: 223}, Hash: []uint8{0xb6, 0xd4, 0xbd, 0x76, 0x5e, 0x9b, 0x80, 0x2f, 0x71, 0x32, 0x5e, 0xf8, 0x41, 0xea, 0x47, 0xc7, 0x4, 0x7d, 0xd, 0x64, 0xa8, 0xf6, 0x22, 0xe4, 0xb4, 0xe1, 0xef, 0x2f, 0x67, 0xf8, 0x8b, 0xaa}, NodeRevision: 34}, - {NodeID: storage.NodeID{Path: []uint8{0x30, 0xe, 0x65, 0x75, 0x4d, 0xd9, 0x7a, 0x1, 0xc5, 0x2b, 0x2a, 0x6f, 0x4b, 0x59, 0x5d, 0xa8, 0xeb, 0x65, 0x25, 0x3a, 0xc5, 0xf7, 0xd2, 0x4b, 0xcc, 0x54, 0xbf, 0xe8, 0x6e, 0xe8, 0x96, 0xb7}, PrefixLenBits: 156}, Hash: []uint8{0x74, 0x93, 0x28, 0x98, 0xbc, 0xd0, 0xfd, 0x28, 0xa9, 0x39, 0xb5, 0xb5, 0xe9, 0xcc, 0x17, 0xe0, 0xe2, 0xd, 0x16, 0x14, 0xfd, 0xb1, 0x67, 0x19, 0x31, 0x3, 0x73, 0x35, 0xb4, 0x1d, 0x6d, 0x1d}, NodeRevision: 34}, - {NodeID: storage.NodeID{Path: []uint8{0x8e, 0x3b, 0x81, 0xe4, 0x2f, 0xe6, 0xd6, 0x52, 0x9b, 0xbd, 0x36, 0xc5, 0x3, 0x52, 0xe9, 0x60, 0xbb, 0xcb, 0xc9, 0xbd, 0x57, 0x96, 0xaf, 0x18, 0xd4, 0x94, 0xdd, 0x8, 0xa2, 0x43, 0x1e, 0x10}, PrefixLenBits: 157}, Hash: []uint8{0xe0, 0xb6, 0xea, 0x8a, 0xf1, 0x57, 0x1e, 0x5c, 0xbe, 0xbe, 0xd9, 0x5b, 0x29, 0x5f, 0x3, 0x7c, 0x32, 0x33, 0x77, 0xf7, 0x1c, 0x9e, 0x19, 0x4d, 0xc6, 0xdb, 0x5, 0xf7, 0x3e, 0x6c, 0xcb, 0x85}, NodeRevision: 34}, + {NodeID: node.NodeID{Path: []uint8{0x2c, 0x8b, 0xcf, 0xe1, 0xc5, 0x71, 0xf4, 0x2d, 0xc2, 0xe9, 0x22, 0x7d, 0x91, 0xd5, 0x93, 0x70, 0x8f, 0x8c, 0x40, 0xca, 0xf, 0xd3, 0xd8, 0x4b, 0x43, 0x6a, 0x3, 0x2f, 0xf1, 0x4, 0x7, 0x9b}, PrefixLenBits: 174}, Hash: []uint8{0x4, 0x7b, 0xe5, 0xab, 0x12, 0x2d, 0x44, 0x98, 0xd8, 0xcc, 0xc7, 0x27, 0x4d, 0xc5, 0xda, 0x59, 0x38, 0xf5, 0x4d, 0x9c, 0x98, 0x33, 0x2a, 0x95, 0xb1, 0x20, 0xe2, 0x8c, 0x7, 0x5f, 0xb5, 0x9a}, NodeRevision: 34}, + {NodeID: node.NodeID{Path: []uint8{0x7c, 0xf5, 0x65, 0xc6, 0xd5, 0xbe, 0x2d, 0x39, 0xff, 0xf4, 0x58, 0xc2, 0x9f, 0x4f, 0x9, 0x3c, 0x54, 0x62, 0xf5, 0x35, 0x19, 0x87, 0x56, 0xb5, 0x4c, 0x6c, 0x11, 0xf3, 0xd7, 0x2, 0xc, 0x80}, PrefixLenBits: 234}, Hash: []uint8{0xbc, 0x33, 0xbe, 0x74, 0x79, 0x43, 0x59, 0x83, 0x5d, 0x93, 0x87, 0x13, 0x22, 0x98, 0xa0, 0x69, 0xed, 0xa5, 0xca, 0xfb, 0x7c, 0x16, 0x91, 0x51, 0xa2, 0xb, 0x9f, 0x17, 0xe4, 0x3f, 0xe3, 0x3}, NodeRevision: 34}, + {NodeID: node.NodeID{Path: []uint8{0x5f, 0xc6, 0x73, 0x1c, 0x5d, 0x57, 0x23, 0xdc, 0x6a, 0xd, 0x38, 0xcb, 0x41, 0x25, 0x97, 0x2, 0x63, 0x8d, 0xa, 0x2d, 0xbe, 0x8e, 0x88, 0xff, 0x9e, 0x54, 0x5b, 0xb4, 0x5d, 0x4e, 0x6e, 0x5b}, PrefixLenBits: 223}, Hash: []uint8{0xb6, 0xd4, 0xbd, 0x76, 0x5e, 0x9b, 0x80, 0x2f, 0x71, 0x32, 0x5e, 0xf8, 0x41, 0xea, 0x47, 0xc7, 0x4, 0x7d, 0xd, 0x64, 0xa8, 0xf6, 0x22, 0xe4, 0xb4, 0xe1, 0xef, 0x2f, 0x67, 0xf8, 0x8b, 0xaa}, NodeRevision: 34}, + {NodeID: node.NodeID{Path: []uint8{0x30, 0xe, 0x65, 0x75, 0x4d, 0xd9, 0x7a, 0x1, 0xc5, 0x2b, 0x2a, 0x6f, 0x4b, 0x59, 0x5d, 0xa8, 0xeb, 0x65, 0x25, 0x3a, 0xc5, 0xf7, 0xd2, 0x4b, 0xcc, 0x54, 0xbf, 0xe8, 0x6e, 0xe8, 0x96, 0xb7}, PrefixLenBits: 156}, Hash: []uint8{0x74, 0x93, 0x28, 0x98, 0xbc, 0xd0, 0xfd, 0x28, 0xa9, 0x39, 0xb5, 0xb5, 0xe9, 0xcc, 0x17, 0xe0, 0xe2, 0xd, 0x16, 0x14, 0xfd, 0xb1, 0x67, 0x19, 0x31, 0x3, 0x73, 0x35, 0xb4, 0x1d, 0x6d, 0x1d}, NodeRevision: 34}, + {NodeID: node.NodeID{Path: []uint8{0x8e, 0x3b, 0x81, 0xe4, 0x2f, 0xe6, 0xd6, 0x52, 0x9b, 0xbd, 0x36, 0xc5, 0x3, 0x52, 0xe9, 0x60, 0xbb, 0xcb, 0xc9, 0xbd, 0x57, 0x96, 0xaf, 0x18, 0xd4, 0x94, 0xdd, 0x8, 0xa2, 0x43, 0x1e, 0x10}, PrefixLenBits: 157}, Hash: []uint8{0xe0, 0xb6, 0xea, 0x8a, 0xf1, 0x57, 0x1e, 0x5c, 0xbe, 0xbe, 0xd9, 0x5b, 0x29, 0x5f, 0x3, 0x7c, 0x32, 0x33, 0x77, 0xf7, 0x1c, 0x9e, 0x19, 0x4d, 0xc6, 0xdb, 0x5, 0xf7, 0x3e, 0x6c, 0xcb, 0x85}, NodeRevision: 34}, } func maybeProfileCPU(t *testing.T) func() { @@ -98,7 +99,7 @@ func getSparseMerkleTreeWriterWithMockTX(ctx context.Context, ctrl *gomock.Contr type rootNodeMatcher struct{} func (r rootNodeMatcher) Matches(x interface{}) bool { - nodes, ok := x.([]storage.NodeID) + nodes, ok := x.([]node.NodeID) if !ok { return false } @@ -121,7 +122,7 @@ func randomBytes(t *testing.T, n int) []byte { func getRandomRootNode(t *testing.T, rev int64) storage.Node { return storage.Node{ - NodeID: storage.NewEmptyNodeID(0), + NodeID: node.NewEmptyNodeID(0), Hash: randomBytes(t, 32), NodeRevision: rev, } @@ -369,8 +370,8 @@ func testSparseTreeCalculatedRootWithWriter(ctx context.Context, t *testing.T, r if err != nil { t.Fatalf("Failed to commit map changes: %v", err) } - if expected, got := vec.expectedRoot, root; !bytes.Equal(expected, got) { - t.Errorf("Expected root:\n%s, but got root:\n%s", base64.StdEncoding.EncodeToString(expected), base64.StdEncoding.EncodeToString(got)) + if got, want := root, vec.expectedRoot; !bytes.Equal(got, want) { + t.Errorf("got root: %x, want %x", got, want) } } @@ -393,11 +394,11 @@ func TestSparseMerkleTreeWriter(t *testing.T) { } type nodeIDFuncMatcher struct { - f func(ids []storage.NodeID) bool + f func(ids []node.NodeID) bool } func (f nodeIDFuncMatcher) Matches(x interface{}) bool { - n, ok := x.([]storage.NodeID) + n, ok := x.([]node.NodeID) if !ok { return false } @@ -419,7 +420,7 @@ func testSparseTreeFetches(ctx context.Context, t *testing.T, vec sparseTestVect reads := make(map[string]string) readMutex := sync.Mutex{} - var leafNodeIDs []storage.NodeID + var leafNodeIDs []node.NodeID { readMutex.Lock() @@ -459,7 +460,7 @@ func testSparseTreeFetches(ctx context.Context, t *testing.T, vec sparseTestVect // Now, set up a mock call for GetMerkleNodes for the nodeIDs in the map // we've just created: - tx.EXPECT().GetMerkleNodes(ctx, int64(rev), nodeIDFuncMatcher{func(ids []storage.NodeID) bool { + tx.EXPECT().GetMerkleNodes(ctx, int64(rev), nodeIDFuncMatcher{func(ids []node.NodeID) bool { if len(ids) == 0 { return false } @@ -476,7 +477,7 @@ func testSparseTreeFetches(ctx context.Context, t *testing.T, vec sparseTestVect // rather than doing that we'll make a note of all the unexpected IDs here // instead, and we can then print them out later on. tx.EXPECT().GetMerkleNodes(ctx, int64(rev), gomock.Any()).AnyTimes().Do( - func(rev int64, a []storage.NodeID) { + func(rev int64, a []node.NodeID) { if a == nil { return } diff --git a/node/nodeid.go b/node/nodeid.go new file mode 100644 index 0000000000..38d54870bb --- /dev/null +++ b/node/nodeid.go @@ -0,0 +1,311 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package node + +import ( + "bytes" + "encoding/binary" + "fmt" + "math/big" + + "github.com/golang/glog" +) + +// NodeID uniquely identifies a Node within a versioned MerkleTree. +type NodeID struct { + // path is effectively a BigEndian bit set, with path[0] being the MSB + // (identifying the root child), and successive bits identifying the lower + // level children down to the leaf. + Path []byte + // PrefixLenBits is the number of MSB in Path which are considered part of + // this NodeID. + // + // e.g. if Path contains two bytes, and PrefixLenBits is 9, then the 8 bits + // in Path[0] are included, along with the lowest bit of Path[1] + PrefixLenBits int +} + +// PathLenBits returns 8 * len(path). +func (n NodeID) PathLenBits() int { + return len(n.Path) * 8 +} + +// bytesForBits returns the number of bytes required to store numBits bits. +func bytesForBits(numBits int) int { + return (numBits + 7) >> 3 +} + +// NewNodeIDFromHash creates a new NodeID for the given Hash. +func NewNodeIDFromHash(h []byte) NodeID { + return NodeID{ + Path: h, + PrefixLenBits: len(h) * 8, + } +} + +// NewEmptyNodeID creates a new zero-length NodeID with sufficient underlying +// capacity to store a maximum of maxLenBits. +func NewEmptyNodeID(maxLenBits int) NodeID { + if got, want := maxLenBits%8, 0; got != want { + panic(fmt.Sprintf("storeage: NewEmptyNodeID() maxLenBits mod 8: %v, want %v", got, want)) + } + return NodeID{ + Path: make([]byte, maxLenBits/8), + PrefixLenBits: 0, + } +} + +// NewNodeIDFromPrefix returns a nodeID for a particular node within a subtree. +// Prefix is the prefix of the subtree. +// depth is the depth of index from the root of the subtree. +// index is the horizontal location of the subtree leaf. +// subDepth is the total number of levels in the subtree. +// totalDepth is the number of levels in the whole tree. +func NewNodeIDFromPrefix(prefix []byte, depth int, index int64, subDepth, totalDepth int) NodeID { + if got, want := totalDepth%8, 0; got != want || got < want { + panic(fmt.Sprintf("storage NewNodeFromPrefix(): totalDepth mod 8: %v, want %v", got, want)) + } + if got, want := subDepth%8, 0; got != want || got < want { + panic(fmt.Sprintf("storage NewNodeFromPrefix(): subDepth mod 8: %v, want %v", got, want)) + } + if got, want := depth, 0; got < want { + panic(fmt.Sprintf("storage NewNodeFromPrefix(): depth: %v, want >= %v", got, want)) + } + + // Put prefix in the MSB bits of path. + path := make([]byte, totalDepth/8) + copy(path, prefix) + + // Convert index into absolute coordinates for subtree. + height := subDepth - depth + subIndex := index << uint(height) // index is the horizontal index at the given height. + + // Copy subDepth/8 bytes of subIndex into path. + subPath := new(bytes.Buffer) + binary.Write(subPath, binary.BigEndian, uint64(subIndex)) + unusedHighBytes := 64/8 - subDepth/8 + copy(path[len(prefix):], subPath.Bytes()[unusedHighBytes:]) + + return NodeID{ + Path: path, + PrefixLenBits: len(prefix)*8 + depth, + } +} + +// NewNodeIDFromBigInt returns a NodeID of a big.Int with no prefix. +// index contains the path's least significant bits. +// depth indicates the number of bits from the most significant bit to treat as part of the path. +func NewNodeIDFromBigInt(depth int, index *big.Int, totalDepth int) NodeID { + if got, want := totalDepth%8, 0; got != want || got < want { + panic(fmt.Sprintf("storage NewNodeFromBitInt(): totalDepth mod 8: %v, want %v", got, want)) + } + + // Put index in the LSB bits of path. + path := make([]byte, totalDepth/8) + unusedHighBytes := len(path) - len(index.Bytes()) + copy(path[unusedHighBytes:], index.Bytes()) + + // TODO(gdbelvin): consider masking off insignificant bits past depth. + glog.V(5).Infof("NewNodeIDFromBigInt(%v, %x, %v): %v, %x", + depth, index.Bytes(), totalDepth, depth, path) + + return NodeID{ + Path: path, + PrefixLenBits: depth, + } +} + +// BigInt returns the big.Int for this node. +func (n NodeID) BigInt() *big.Int { + return new(big.Int).SetBytes(n.Path) +} + +// NewNodeIDWithPrefix creates a new NodeID of nodeIDLen bits with the prefixLen MSBs set to prefix. +// NewNodeIDWithPrefix places the lower prefixLenBits of prefix in the most significant bits of path. +// Path will have enough bytes to hold maxLenBits +// +func NewNodeIDWithPrefix(prefix uint64, prefixLenBits, nodeIDLenBits, maxLenBits int) NodeID { + if got, want := nodeIDLenBits%8, 0; got != want { + panic(fmt.Sprintf("nodeIDLenBits mod 8: %v, want %v", got, want)) + } + maxLenBytes := bytesForBits(maxLenBits) + p := NodeID{ + Path: make([]byte, maxLenBytes), + PrefixLenBits: nodeIDLenBits, + } + + bit := maxLenBits - prefixLenBits + for i := 0; i < prefixLenBits; i++ { + if prefix&1 != 0 { + p.SetBit(bit, 1) + } + bit++ + prefix >>= 1 + } + return p +} + +func bitLen(x int64) int { + r := 0 + for x > 0 { + r++ + x >>= 1 + } + return r +} + +// NewNodeIDForTreeCoords creates a new NodeID for a Tree node with a specified depth and +// index. +// This method is used exclusively by the Log, and, since the Log model grows upwards from the +// leaves, we modify the provided coords accordingly. +// +// depth is the Merkle tree level: 0 = leaves, and increases upwards towards the root. +// +// index is the horizontal index into the tree at level depth, so the returned +// NodeID will be zero padded on the right by depth places. +func NewNodeIDForTreeCoords(depth int64, index int64, maxPathBits int) (NodeID, error) { + bl := bitLen(index) + if index < 0 || depth < 0 || + bl > int(maxPathBits-int(depth)) || + maxPathBits%8 != 0 { + return NodeID{}, fmt.Errorf("depth/index combination out of range: depth=%d index=%d maxPathBits=%v", depth, index, maxPathBits) + } + // This node is effectively a prefix of the subtree underneath (for non-leaf + // depths), so we shift the index accordingly. + uidx := uint64(index) << uint(depth) + r := NewEmptyNodeID(maxPathBits) + for i := len(r.Path) - 1; uidx > 0 && i >= 0; i-- { + r.Path[i] = byte(uidx & 0xff) + uidx >>= 8 + } + // In the storage model nodes closer to the leaves have longer nodeIDs, so + // we "reverse" depth here: + r.PrefixLenBits = int(maxPathBits - int(depth)) + return r, nil +} + +// SetBit sets the ith bit to true if b is non-zero, and false otherwise. +func (n *NodeID) SetBit(i int, b uint) { + // TODO(al): investigate whether having lookup tables for these might be + // faster. + bIndex := (n.PathLenBits() - i - 1) / 8 + if b == 0 { + n.Path[bIndex] &= ^(1 << uint(i%8)) + } else { + n.Path[bIndex] |= (1 << uint(i%8)) + } +} + +// Bit returns 1 if the ith bit is true, and false otherwise. +func (n *NodeID) Bit(i int) uint { + if got, want := i, n.PathLenBits()-1; got > want { + panic(fmt.Sprintf("storage: Bit(%v) > (PathLenBits() -1): %v", got, want)) + } + bIndex := (n.PathLenBits() - i - 1) / 8 + return uint((n.Path[bIndex] >> uint(i%8)) & 0x01) +} + +// String returns a string representation of the binary value of the NodeID. +// The left-most bit is the MSB (i.e. nearer the root of the tree). +func (n *NodeID) String() string { + var r bytes.Buffer + limit := n.PathLenBits() - n.PrefixLenBits + for i := n.PathLenBits() - 1; i >= limit; i-- { + r.WriteRune(rune('0' + n.Bit(i))) + } + return r.String() +} + +// CoordString returns a string representation assuming that the NodeID represents a +// tree coordinate. Using this on a NodeID for a sparse Merkle tree will give incorrect +// results. Intended for debugging purposes, the format could change. +func (n *NodeID) CoordString() string { + d := uint64(n.PathLenBits() - n.PrefixLenBits) + i := uint64(0) + for _, p := range n.Path { + i = (i << uint64(8)) + uint64(p) + } + + return fmt.Sprintf("[d:%d, i:%d]", d, i>>d) +} + +// Siblings returns the siblings of the given node. +func (n *NodeID) Siblings() []NodeID { + l := n.PrefixLenBits + r := make([]NodeID, l) + // Index of the bit to twiddle: + bi := n.PathLenBits() - n.PrefixLenBits + for i := range r { + r[i].PrefixLenBits = l - i + r[i].Path = make([]byte, len(n.Path)) + copy(r[i].Path, n.Path) + r[i].SetBit(bi, n.Bit(bi)^1) + for j := bi - 1; j >= 0; j-- { + r[i].SetBit(j, 0) + } + bi++ + } + if glog.V(5) { + glog.Infof("[%d, %x].Siblings():", n.PrefixLenBits, n.Path) + for _, s := range r { + glog.Infof(" %x", s.Path) + } + } + return r +} + +// NewNodeIDFromPrefixSuffix undoes Split() and returns the NodeID. +func NewNodeIDFromPrefixSuffix(prefix []byte, suffix Suffix, maxPathBits int) NodeID { + path := make([]byte, maxPathBits/8) + copy(path, prefix) + copy(path[len(prefix):], suffix.Path) + + return NodeID{ + Path: path, + PrefixLenBits: len(prefix)*8 + int(suffix.Bits), + } +} + +// Split splits a NodeID into a prefix and a suffix at prefixSplit +func (n *NodeID) Split(prefixBytes, suffixBits int) ([]byte, Suffix) { + if n.PrefixLenBits == 0 { + return []byte{}, Suffix{Bits: 0, Path: []byte{0}} + } + a := make([]byte, len(n.Path)) + copy(a, n.Path) + + bits := n.PrefixLenBits - prefixBytes*8 + if bits > suffixBits { + panic(fmt.Sprintf("storage Split: %x(n.PrefixLenBits: %v - prefixBytes: %v *8) > %v", n.Path, n.PrefixLenBits, prefixBytes, suffixBits)) + } + if bits == 0 { + panic(fmt.Sprintf("storage Split: %x(n.PrefixLenBits: %v - prefixBytes: %v *8) == 0", n.Path, n.PrefixLenBits, prefixBytes)) + } + suffixBytes := bytesForBits(bits) + sfx := Suffix{ + Bits: byte(bits), + Path: a[prefixBytes : prefixBytes+suffixBytes], + } + maskIndex := (bits - 1) / 8 + maskLowBits := (sfx.Bits-1)%8 + 1 + sfx.Path[maskIndex] &= ((0x01 << maskLowBits) - 1) << uint(8-maskLowBits) + + return a[:prefixBytes], sfx +} + +// Equivalent return true iff the other represents the same path prefix as this NodeID. +func (n *NodeID) Equivalent(other NodeID) bool { + return n.String() == other.String() +} diff --git a/storage/types_test.go b/node/nodeid_test.go similarity index 80% rename from storage/types_test.go rename to node/nodeid_test.go index df904d8c87..077a5c9176 100644 --- a/storage/types_test.go +++ b/node/nodeid_test.go @@ -59,27 +59,28 @@ func TestSplit(t *testing.T) { outPrefix []byte outSuffixBits int outSuffix []byte + unusedBytes int }{ - {h2b("1234567f"), 32, 3, 8, h2b("123456"), 8, h2b("7f")}, - {h2b("123456ff"), 29, 3, 8, h2b("123456"), 5, h2b("f8")}, - {h2b("123456ff"), 25, 3, 8, h2b("123456"), 1, h2b("80")}, - {h2b("12345678"), 16, 1, 8, h2b("12"), 8, h2b("34")}, - {h2b("12345678"), 9, 1, 8, h2b("12"), 1, h2b("00")}, - {h2b("12345678"), 8, 0, 8, h2b(""), 8, h2b("12")}, - {h2b("12345678"), 7, 0, 8, h2b(""), 7, h2b("12")}, - {h2b("12345678"), 0, 0, 8, h2b(""), 0, h2b("00")}, - {h2b("70"), 2, 0, 8, h2b(""), 2, h2b("40")}, - {h2b("70"), 3, 0, 8, h2b(""), 3, h2b("60")}, - {h2b("70"), 4, 0, 8, h2b(""), 4, h2b("70")}, - {h2b("70"), 5, 0, 8, h2b(""), 5, h2b("70")}, - {h2b("0003"), 16, 1, 8, h2b("00"), 8, h2b("03")}, - {h2b("0003"), 15, 1, 8, h2b("00"), 7, h2b("02")}, - {h2b("0001000000000000"), 16, 1, 8, h2b("00"), 8, h2b("01")}, - {h2b("0100000000000000"), 8, 0, 8, h2b(""), 8, h2b("01")}, + {h2b("1234567f"), 32, 3, 8, h2b("123456"), 8, h2b("7f"), 0}, + {h2b("123456ff"), 29, 3, 8, h2b("123456"), 5, h2b("f8"), 0}, + {h2b("123456ff"), 25, 3, 8, h2b("123456"), 1, h2b("80"), 0}, + {h2b("12345678"), 16, 1, 8, h2b("12"), 8, h2b("34"), 2}, + {h2b("12345678"), 9, 1, 8, h2b("12"), 1, h2b("00"), 2}, + {h2b("12345678"), 8, 0, 8, h2b(""), 8, h2b("12"), 3}, + {h2b("12345678"), 7, 0, 8, h2b(""), 7, h2b("12"), 3}, + {h2b("12345678"), 0, 0, 8, h2b(""), 0, h2b("00"), 3}, + {h2b("70"), 2, 0, 8, h2b(""), 2, h2b("40"), 0}, + {h2b("70"), 3, 0, 8, h2b(""), 3, h2b("60"), 0}, + {h2b("70"), 4, 0, 8, h2b(""), 4, h2b("70"), 0}, + {h2b("70"), 5, 0, 8, h2b(""), 5, h2b("70"), 0}, + {h2b("0003"), 16, 1, 8, h2b("00"), 8, h2b("03"), 0}, + {h2b("0003"), 15, 1, 8, h2b("00"), 7, h2b("02"), 0}, + {h2b("0001000000000000"), 16, 1, 8, h2b("00"), 8, h2b("01"), 6}, + {h2b("0100000000000000"), 8, 0, 8, h2b(""), 8, h2b("01"), 7}, // Map subtree scenarios - {h2b("0100000000000000"), 16, 0, 16, h2b(""), 16, h2b("0100")}, - {h2b("0100000000000000"), 32, 0, 32, h2b(""), 32, h2b("01000000")}, - {h2b("0000000000000000000000000000000000000000000000000000000000000001"), 256, 10, 176, h2b("00000000000000000000"), 176, h2b("00000000000000000000000000000000000000000001")}, + {h2b("0100000000000000"), 16, 0, 16, h2b(""), 16, h2b("0100"), 6}, + {h2b("0100000000000000"), 32, 0, 32, h2b(""), 32, h2b("01000000"), 4}, + {h2b("0000000000000000000000000000000000000000000000000000000000000001"), 256, 10, 176, h2b("00000000000000000000"), 176, h2b("00000000000000000000000000000000000000000001"), 0}, } { n := NewNodeIDFromHash(tc.inPath) n.PrefixLenBits = tc.inPathLenBits @@ -98,39 +99,19 @@ func TestSplit(t *testing.T) { if got, want := s.Path, tc.outSuffix; !bytes.Equal(got, want) { t.Errorf("%d, %x.Split(%v, %v).Path: %x, want %x", tc.inPathLenBits, tc.inPath, tc.splitBytes, tc.suffixBits, got, want) + continue } - } -} -func TestNewNodeIDFromRelativeBigInt(t *testing.T) { - for _, tc := range []struct { - prefix []byte - depth int - index int64 - subDepth int - totalDepth int - wantPath []byte - wantDepth int - }{ - {prefix: h2b(""), depth: 8, index: 0, subDepth: 8, totalDepth: 64, wantPath: h2b("0000000000000000"), wantDepth: 8}, - {prefix: h2b(""), depth: 8, index: 1, subDepth: 8, totalDepth: 64, wantPath: h2b("0100000000000000"), wantDepth: 8}, - {prefix: h2b("00"), depth: 7, index: 1, subDepth: 8, totalDepth: 64, wantPath: h2b("0001000000000000"), wantDepth: 15}, - {prefix: h2b("00"), depth: 8, index: 1, subDepth: 8, totalDepth: 64, wantPath: h2b("0001000000000000"), wantDepth: 16}, - {prefix: h2b("00"), depth: 16, index: 257, subDepth: 16, totalDepth: 64, wantPath: h2b("0001010000000000"), wantDepth: 24}, - {prefix: h2b("12345678"), depth: 8, index: 1, subDepth: 8, totalDepth: 64, wantPath: h2b("1234567801000000"), wantDepth: 40}, - - {prefix: h2b("00"), subDepth: 248, depth: 247, index: 1, totalDepth: 256, wantPath: h2b("0000000000000000000000000000000000000000000000000000000000000001"), wantDepth: 255}, - {prefix: h2b("00000000000000000000"), subDepth: 176, depth: 176, index: 1, totalDepth: 256, wantPath: h2b("0000000000000000000000000000000000000000000000000000000000000001"), wantDepth: 256}, - } { - i := big.NewInt(tc.index) - n := NewNodeIDFromRelativeBigInt(tc.prefix, tc.subDepth, tc.depth, i, tc.totalDepth) - if got, want := n.Path, tc.wantPath; !bytes.Equal(got, want) { - t.Errorf("NewNodeIDFromRelativeBigInt(%x, %v, %v, %v, %v).Path: %x, want %x", - tc.prefix, tc.depth, tc.index, tc.subDepth, tc.totalDepth, got, want) + newNode := NewNodeIDFromPrefixSuffix(p, s, len(tc.inPath)*8) + want := []byte{} + want = append(want, tc.outPrefix...) + want = append(want, tc.outSuffix...) + want = append(want, make([]byte, tc.unusedBytes)...) + if got, want := newNode.Path, want; !bytes.Equal(got, want) { + t.Errorf("NewNodeIDFromPrefix(%x, %v).Path: %x, want %x", p, s, got, want) } - if got, want := n.PrefixLenBits, tc.wantDepth; got != want { - t.Errorf("NewNodeIDFromRelativeBigInt(%x, %v, %v, %v, %v).Depth: %v, want %v", - tc.prefix, tc.depth, tc.index, tc.subDepth, tc.totalDepth, got, want) + if got, want := newNode.PrefixLenBits, n.PrefixLenBits; got != want { + t.Errorf("NewNodeIDFromPrefix(%x, %v).PrefixLenBits: %x, want %x", p, s, got, want) } } } diff --git a/storage/suffix.go b/node/suffix.go similarity index 82% rename from storage/suffix.go rename to node/suffix.go index 18cd60a1f7..1af6961aa7 100644 --- a/storage/suffix.go +++ b/node/suffix.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package storage +package node import ( "encoding/base64" @@ -37,3 +37,16 @@ func (s Suffix) String() string { r = append(r, s.Path...) return base64.StdEncoding.EncodeToString(r) } + +// ParseSuffix converts a suffix string back into a Suffix. +func ParseSuffix(s string) (Suffix, error) { + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + return Suffix{}, err + } + + return Suffix{ + Bits: byte(b[0]), + Path: b[1:], + }, nil +} diff --git a/storage/suffix_test.go b/node/suffix_test.go similarity index 84% rename from storage/suffix_test.go rename to node/suffix_test.go index 5477aa053f..67d88d25b1 100644 --- a/storage/suffix_test.go +++ b/node/suffix_test.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package storage +package node import ( "bytes" @@ -26,9 +26,36 @@ const ( logStrataDepth = 8 maxLogDepth = 64 // TODO(gdbelvin): remove these constants in favor of the real ones in - // storage/cache when merkle no longer depends on storage.NodeID + // storage/cache when merkle no longer depends on node.NodeID ) +func TestParseSuffix(t *testing.T) { + for _, tc := range []struct { + prefix []byte + leafIndex int64 + want []byte + }{ + {h2b(""), 1, h2b("0801")}, + {h2b("00"), 1, h2b("0801")}, + } { + nodeID := NewNodeIDFromPrefix(tc.prefix, logStrataDepth, tc.leafIndex, logStrataDepth, maxLogDepth) + _, sfx := nodeID.Split(len(tc.prefix), logStrataDepth) + sfxKey := sfx.String() + + sfxP, err := ParseSuffix(sfxKey) + if err != nil { + t.Errorf("ParseSuffix(%s): %v", sfxKey, err) + continue + } + if got, want := sfx.Bits, sfxP.Bits; got != want { + t.Errorf("ParseSuffix(%s).Bits: %v, want %v", sfxKey, got, want) + } + if got, want := sfx.Path, sfxP.Path; !bytes.Equal(got, want) { + t.Errorf("ParseSuffix(%s).Bits: %x, want %x", sfxKey, got, want) + } + } +} + // TestSuffixKeyEquals ensures that NodeID.Split produces the same output as makeSuffixKey for the Log's use cases. func TestSuffixKeyEquals(t *testing.T) { for _, tc := range []struct { diff --git a/quota/mock_quota.go b/quota/mock_quota.go index 9a838e3d86..b195316385 100644 --- a/quota/mock_quota.go +++ b/quota/mock_quota.go @@ -5,8 +5,9 @@ package quota import ( context "context" - gomock "github.com/golang/mock/gomock" reflect "reflect" + + gomock "github.com/golang/mock/gomock" ) // MockManager is a mock of Manager interface diff --git a/scripts/presubmit.sh b/scripts/presubmit.sh index 589d3132b7..c1068c247f 100755 --- a/scripts/presubmit.sh +++ b/scripts/presubmit.sh @@ -93,7 +93,7 @@ main() { go build ${go_dirs} echo 'running go test' - go test -cover -timeout=5m ${goflags} ${go_dirs} + go test -cover -timeout=10m ${goflags} ${go_dirs} fi if [[ "${run_linters}" -eq 1 ]]; then diff --git a/server/log_rpc_server_test.go b/server/log_rpc_server_test.go index c429495a12..e266cad3bf 100644 --- a/server/log_rpc_server_test.go +++ b/server/log_rpc_server_test.go @@ -25,6 +25,7 @@ import ( "github.com/google/trillian" "github.com/google/trillian/extension" "github.com/google/trillian/merkle/rfc6962" + "github.com/google/trillian/node" "github.com/google/trillian/storage" stestonly "github.com/google/trillian/storage/testonly" "github.com/kylelemons/godebug/pretty" @@ -68,12 +69,12 @@ var ( getConsistencyProofRequest25 = trillian.GetConsistencyProofRequest{LogId: logID1, FirstTreeSize: 10, SecondTreeSize: 25} getConsistencyProofRequest7 = trillian.GetConsistencyProofRequest{LogId: logID1, FirstTreeSize: 4, SecondTreeSize: 7} - nodeIdsInclusionSize7Index2 = []storage.NodeID{ + nodeIdsInclusionSize7Index2 = []node.NodeID{ stestonly.MustCreateNodeIDForTreeCoords(0, 3, 64), stestonly.MustCreateNodeIDForTreeCoords(1, 0, 64), stestonly.MustCreateNodeIDForTreeCoords(2, 1, 64)} - nodeIdsConsistencySize4ToSize7 = []storage.NodeID{stestonly.MustCreateNodeIDForTreeCoords(2, 1, 64)} + nodeIdsConsistencySize4ToSize7 = []node.NodeID{stestonly.MustCreateNodeIDForTreeCoords(2, 1, 64)} ) func TestGetLeavesByIndexInvalidIndexRejected(t *testing.T) { diff --git a/server/map_rpc_server.go b/server/map_rpc_server.go index 2222d19c5d..3e879df0d1 100644 --- a/server/map_rpc_server.go +++ b/server/map_rpc_server.go @@ -106,7 +106,7 @@ func (t *TrillianMapServer) GetLeaves(ctx context.Context, req *trillian.GetMapL leaf = &trillian.MapLeaf{ Index: index, LeafValue: nil, - LeafHash: hasher.HashLeaf(mapID, index, hasher.BitLen(), nil), + LeafHash: hasher.HashLeaf(mapID, index, 0, nil), } } @@ -167,7 +167,7 @@ func (t *TrillianMapServer) SetLeaves(ctx context.Context, req *trillian.SetMapL "len(%x): %v, want %v", l.Index, got, want) } // TODO(gbelvin) use LeafHash rather than computing here. #423 - l.LeafHash = hasher.HashLeaf(mapID, l.Index, hasher.BitLen(), l.LeafValue) + l.LeafHash = hasher.HashLeaf(mapID, l.Index, 0, l.LeafValue) if err = tx.Set(ctx, l.Index, *l); err != nil { return nil, err diff --git a/server/mock_log_operation.go b/server/mock_log_operation.go index d1da16a4c7..a7caa36967 100644 --- a/server/mock_log_operation.go +++ b/server/mock_log_operation.go @@ -5,8 +5,9 @@ package server import ( context "context" - gomock "github.com/golang/mock/gomock" reflect "reflect" + + gomock "github.com/golang/mock/gomock" ) // MockLogOperation is a mock of LogOperation interface diff --git a/server/proof_fetcher.go b/server/proof_fetcher.go index 847a3f1119..2dc8c46387 100644 --- a/server/proof_fetcher.go +++ b/server/proof_fetcher.go @@ -21,6 +21,7 @@ import ( "github.com/google/trillian" "github.com/google/trillian/merkle" "github.com/google/trillian/merkle/hashers" + "github.com/google/trillian/node" "github.com/google/trillian/storage" ) @@ -101,7 +102,7 @@ func (r *rehasher) rehashedProof(leafIndex int64) (trillian.Proof, error) { // fetchNodes extracts the NodeIDs from a list of NodeFetch structs and passes them // to storage, returning the result after some additional validation checks. func fetchNodes(ctx context.Context, tx storage.NodeReader, treeRevision int64, fetches []merkle.NodeFetch) ([]storage.Node, error) { - proofNodeIDs := make([]storage.NodeID, 0, len(fetches)) + proofNodeIDs := make([]node.NodeID, 0, len(fetches)) for _, fetch := range fetches { proofNodeIDs = append(proofNodeIDs, fetch.NodeID) diff --git a/server/sequencer_manager_test.go b/server/sequencer_manager_test.go index 3c194524af..b7b5fb69ff 100644 --- a/server/sequencer_manager_test.go +++ b/server/sequencer_manager_test.go @@ -28,6 +28,7 @@ import ( "github.com/google/trillian/crypto/sigpb" "github.com/google/trillian/extension" "github.com/google/trillian/merkle/rfc6962" + "github.com/google/trillian/node" "github.com/google/trillian/quota" "github.com/google/trillian/storage" stestonly "github.com/google/trillian/storage/testonly" @@ -64,7 +65,7 @@ var testRoot0 = trillian.SignedLogRoot{ SignatureAlgorithm: sigpb.DigitallySigned_ECDSA, }, } -var updatedNodes0 = []storage.Node{{NodeID: storage.NodeID{Path: []uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, PrefixLenBits: 64}, Hash: testonly.MustDecodeBase64("bjQLnP+zepicpUTmu3gKLHiQHT+zNzh2hRGjBhevoB0="), NodeRevision: 1}} +var updatedNodes0 = []storage.Node{{NodeID: node.NodeID{Path: []uint8{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, PrefixLenBits: 64}, Hash: testonly.MustDecodeBase64("bjQLnP+zepicpUTmu3gKLHiQHT+zNzh2hRGjBhevoB0="), NodeRevision: 1}} var updatedRoot = trillian.SignedLogRoot{ LogId: testLogID1, TimestampNanos: fakeTime.UnixNano(), diff --git a/storage/cache/gen.go b/storage/cache/gen.go index 68d298d4d9..5732844efc 100644 --- a/storage/cache/gen.go +++ b/storage/cache/gen.go @@ -18,12 +18,12 @@ package cache //go:generate mockgen -self_package github.com/google/trillian/storage/cache -package cache -imports github.com/google/trillian/storage/storagepb -destination mock_node_storage.go github.com/google/trillian/storage/cache NodeStorage import ( - "github.com/google/trillian/storage" + "github.com/google/trillian/node" "github.com/google/trillian/storage/storagepb" ) // NodeStorage provides an interface for storing and retrieving subtrees. type NodeStorage interface { - GetSubtree(n storage.NodeID) (*storagepb.SubtreeProto, error) + GetSubtree(n node.NodeID) (*storagepb.SubtreeProto, error) SetSubtrees(s []*storagepb.SubtreeProto) error } diff --git a/storage/cache/mock_node_storage.go b/storage/cache/mock_node_storage.go index b8be7cbcf3..f3595fbdcd 100644 --- a/storage/cache/mock_node_storage.go +++ b/storage/cache/mock_node_storage.go @@ -4,10 +4,11 @@ package cache import ( + reflect "reflect" + gomock "github.com/golang/mock/gomock" - storage "github.com/google/trillian/storage" + "github.com/google/trillian/node" storagepb "github.com/google/trillian/storage/storagepb" - reflect "reflect" ) // MockNodeStorage is a mock of NodeStorage interface @@ -34,7 +35,7 @@ func (_m *MockNodeStorage) EXPECT() *MockNodeStorageMockRecorder { } // GetSubtree mocks base method -func (_m *MockNodeStorage) GetSubtree(_param0 storage.NodeID) (*storagepb.SubtreeProto, error) { +func (_m *MockNodeStorage) GetSubtree(_param0 node.NodeID) (*storagepb.SubtreeProto, error) { ret := _m.ctrl.Call(_m, "GetSubtree", _param0) ret0, _ := ret[0].(*storagepb.SubtreeProto) ret1, _ := ret[1].(error) diff --git a/storage/cache/subtree_cache.go b/storage/cache/subtree_cache.go index 784d7fb7de..f49cff0489 100644 --- a/storage/cache/subtree_cache.go +++ b/storage/cache/subtree_cache.go @@ -24,15 +24,16 @@ import ( "github.com/golang/glog" "github.com/google/trillian/merkle" "github.com/google/trillian/merkle/hashers" + "github.com/google/trillian/node" "github.com/google/trillian/storage" "github.com/google/trillian/storage/storagepb" ) // GetSubtreeFunc describes a function which can return a Subtree from storage. -type GetSubtreeFunc func(id storage.NodeID) (*storagepb.SubtreeProto, error) +type GetSubtreeFunc func(id node.NodeID) (*storagepb.SubtreeProto, error) // GetSubtreesFunc describes a function which can return a number of Subtrees from storage. -type GetSubtreesFunc func(ids []storage.NodeID) ([]*storagepb.SubtreeProto, error) +type GetSubtreesFunc func(ids []node.NodeID) ([]*storagepb.SubtreeProto, error) // SetSubtreesFunc describes a function which can store a collection of Subtrees into storage. type SetSubtreesFunc func(s []*storagepb.SubtreeProto) error @@ -132,7 +133,7 @@ func (s *SubtreeCache) stratumInfoForPrefixLength(numBits int) stratumInfo { // splitNodeID breaks a NodeID out into its prefix and suffix parts. // unless ID is 0 bits long, Suffix must always contain at least one bit. -func (s *SubtreeCache) splitNodeID(id storage.NodeID) ([]byte, storage.Suffix) { +func (s *SubtreeCache) splitNodeID(id node.NodeID) ([]byte, node.Suffix) { sInfo := s.stratumInfoForPrefixLength(id.PrefixLenBits - 1) return id.Split(sInfo.prefixBytes, sInfo.depth) } @@ -140,12 +141,12 @@ func (s *SubtreeCache) splitNodeID(id storage.NodeID) ([]byte, storage.Suffix) { // preload calculates the set of subtrees required to know the hashes of the // passed in node IDs, uses getSubtrees to retrieve them, and finally populates // the cache structures with the data. -func (s *SubtreeCache) preload(ids []storage.NodeID, getSubtrees GetSubtreesFunc) error { +func (s *SubtreeCache) preload(ids []node.NodeID, getSubtrees GetSubtreesFunc) error { s.mutex.Lock() defer s.mutex.Unlock() // Figure out the set of subtrees we need: - want := make(map[string]*storage.NodeID) + want := make(map[string]*node.NodeID) for _, id := range ids { id := id px, _ := s.splitNodeID(id) @@ -163,7 +164,7 @@ func (s *SubtreeCache) preload(ids []storage.NodeID, getSubtrees GetSubtreesFunc return nil } - list := make([]storage.NodeID, 0, len(want)) + list := make([]node.NodeID, 0, len(want)) for _, v := range want { list = append(list, *v) } @@ -197,7 +198,7 @@ func (s *SubtreeCache) preload(ids []storage.NodeID, getSubtrees GetSubtreesFunc // GetNodes returns the requested nodes, calling the getSubtrees function if // they are not already cached. -func (s *SubtreeCache) GetNodes(ids []storage.NodeID, getSubtrees GetSubtreesFunc) ([]storage.Node, error) { +func (s *SubtreeCache) GetNodes(ids []node.NodeID, getSubtrees GetSubtreesFunc) ([]storage.Node, error) { if glog.V(4) { for _, n := range ids { glog.Infof("cache: GetNodes(%x, %d", n.Path, n.PrefixLenBits) @@ -211,11 +212,11 @@ func (s *SubtreeCache) GetNodes(ids []storage.NodeID, getSubtrees GetSubtreesFun for _, id := range ids { h, err := s.GetNodeHash( id, - func(n storage.NodeID) (*storagepb.SubtreeProto, error) { + func(n node.NodeID) (*storagepb.SubtreeProto, error) { // This should never happen - we should've already read all the data we // need above, in Preload() glog.Warningf("Unexpectedly reading from within GetNodeHash(): %s", n.String()) - ret, err := getSubtrees([]storage.NodeID{n}) + ret, err := getSubtrees([]node.NodeID{n}) if err != nil || len(ret) == 0 { return nil, err } @@ -239,14 +240,14 @@ func (s *SubtreeCache) GetNodes(ids []storage.NodeID, getSubtrees GetSubtreesFun } // GetNodeHash returns a single node hash from the cache. -func (s *SubtreeCache) GetNodeHash(id storage.NodeID, getSubtree GetSubtreeFunc) ([]byte, error) { +func (s *SubtreeCache) GetNodeHash(id node.NodeID, getSubtree GetSubtreeFunc) ([]byte, error) { s.mutex.RLock() defer s.mutex.RUnlock() return s.getNodeHashUnderLock(id, getSubtree) } // getNodeHashUnderLock must be called with s.mutex locked. -func (s *SubtreeCache) getNodeHashUnderLock(id storage.NodeID, getSubtree GetSubtreeFunc) ([]byte, error) { +func (s *SubtreeCache) getNodeHashUnderLock(id node.NodeID, getSubtree GetSubtreeFunc) ([]byte, error) { px, sx := s.splitNodeID(id) prefixKey := string(px) c := s.subtrees[prefixKey] @@ -304,7 +305,7 @@ func (s *SubtreeCache) getNodeHashUnderLock(id storage.NodeID, getSubtree GetSub } // SetNodeHash sets a node hash in the cache. -func (s *SubtreeCache) SetNodeHash(id storage.NodeID, h []byte, getSubtree GetSubtreeFunc) error { +func (s *SubtreeCache) SetNodeHash(id node.NodeID, h []byte, getSubtree GetSubtreeFunc) error { s.mutex.Lock() defer s.mutex.Unlock() px, sx := s.splitNodeID(id) @@ -343,7 +344,7 @@ func (s *SubtreeCache) SetNodeHash(id storage.NodeID, h []byte, getSubtree GetSu if err != nil { glog.Errorf("base64.DecodeString(%v): %v", sfxKey, err) } - glog.Infof("SetNodeHash(pfx: %s, sfx: %x): %x", prefixKey, b, h) + glog.Infof("SetNodeHash(pfx: %x, sfx: %x): %x", prefixKey, b, h) } return nil } @@ -379,7 +380,7 @@ func (s *SubtreeCache) Flush(setSubtrees SetSubtreesFunc) error { return setSubtrees(treesToWrite) } -func (s *SubtreeCache) newEmptySubtree(id storage.NodeID, px []byte) *storagepb.SubtreeProto { +func (s *SubtreeCache) newEmptySubtree(id node.NodeID, px []byte) *storagepb.SubtreeProto { sInfo := s.stratumInfoForPrefixLength(id.PrefixLenBits) glog.V(1).Infof("Creating new empty subtree for %x, with depth %d", px, sInfo.depth) // storage didn't have one for us, so we'll store an empty proto here @@ -400,33 +401,30 @@ func (s *SubtreeCache) newEmptySubtree(id storage.NodeID, px []byte) *storagepb. func PopulateMapSubtreeNodes(treeID int64, hasher hashers.MapHasher) storage.PopulateSubtreeFunc { return func(st *storagepb.SubtreeProto) error { st.InternalNodes = make(map[string][]byte) - rootID := storage.NewNodeIDFromHash(st.Prefix) leaves := make([]merkle.HStar2LeafHash, 0, len(st.Leaves)) for k64, v := range st.Leaves { - k, err := base64.StdEncoding.DecodeString(k64) + sfx, err := node.ParseSuffix(k64) if err != nil { return err } - if k[0]%depthQuantum != 0 { - return fmt.Errorf("unexpected non-leaf suffix found: %x", k) + // TODO(gdbelvin): test against subtree depth. + if sfx.Bits%depthQuantum != 0 { + return fmt.Errorf("unexpected non-leaf suffix found: %x", sfx.Bits) } + leaves = append(leaves, merkle.HStar2LeafHash{ + Index: storage.NewNodeIDFromPrefixSuffix(st.Prefix, sfx, hasher.BitLen()).BigInt(), LeafHash: v, - Index: new(big.Int).SetBytes(k[1:]), }) } hs2 := merkle.NewHStar2(treeID, hasher) - offset := hasher.BitLen() - rootID.PrefixLenBits - int(st.Depth) - root, err := hs2.HStar2Nodes(int(st.Depth), offset, leaves, - func(depth int, index *big.Int) ([]byte, error) { - return nil, nil - }, + root, err := hs2.HStar2Nodes(st.Prefix, int(st.Depth), leaves, nil, func(depth int, index *big.Int, h []byte) error { - if depth == 0 { + if depth == len(st.Prefix)*8 { // no space for the root in the node cache return nil } - nodeID := storage.NewNodeIDFromRelativeBigInt(st.Prefix, int(st.Depth), depth, index, hasher.BitLen()) + nodeID := storage.NewNodeIDFromBigInt(depth, index, hasher.BitLen()) _, sfx := nodeID.Split(len(st.Prefix), int(st.Depth)) sfxKey := sfx.String() if glog.V(4) { diff --git a/storage/cache/subtree_cache_test.go b/storage/cache/subtree_cache_test.go index 0894bb5bc9..3db5dc819d 100644 --- a/storage/cache/subtree_cache_test.go +++ b/storage/cache/subtree_cache_test.go @@ -23,6 +23,7 @@ import ( "github.com/google/trillian/merkle" "github.com/google/trillian/merkle/maphasher" "github.com/google/trillian/merkle/rfc6962" + "github.com/google/trillian/node" "github.com/google/trillian/storage" "github.com/google/trillian/storage/storagepb" "github.com/google/trillian/testonly" @@ -119,10 +120,10 @@ func TestCacheGetNodesReadsSubtrees(t *testing.T) { m := NewMockNodeStorage(mockCtrl) c := NewSubtreeCache(defaultLogStrata, PopulateMapSubtreeNodes(treeID, maphasher.Default), PrepareMapSubtreeWrite()) - nodeIDs := []storage.NodeID{ - storage.NewNodeIDFromHash([]byte("1234")), - storage.NewNodeIDFromHash([]byte("4567")), - storage.NewNodeIDFromHash([]byte("89ab")), + nodeIDs := []node.NodeID{ + node.NewNodeIDFromHash([]byte("1234")), + node.NewNodeIDFromHash([]byte("4567")), + node.NewNodeIDFromHash([]byte("89ab")), } // Set up the expected reads: @@ -143,7 +144,7 @@ func TestCacheGetNodesReadsSubtrees(t *testing.T) { nodeIDs, // Glue function to convert a call requesting multiple subtrees into a // sequence of calls to our mock storage: - func(ids []storage.NodeID) ([]*storagepb.SubtreeProto, error) { + func(ids []node.NodeID) ([]*storagepb.SubtreeProto, error) { ret := make([]*storagepb.SubtreeProto, 0) for _, i := range ids { r, err := m.GetSubtree(i) @@ -161,7 +162,7 @@ func TestCacheGetNodesReadsSubtrees(t *testing.T) { } } -func noFetch(storage.NodeID) (*storagepb.SubtreeProto, error) { +func noFetch(node.NodeID) (*storagepb.SubtreeProto, error) { return nil, errors.New("not supposed to read anything") } @@ -184,7 +185,7 @@ func TestCacheFlush(t *testing.T) { //e := nodeID e.PrefixLenBits = b expectedSetIDs[e.String()] = "expected" - m.EXPECT().GetSubtree(stestonly.NodeIDEq(e)).Do(func(n storage.NodeID) { + m.EXPECT().GetSubtree(stestonly.NodeIDEq(e)).Do(func(n node.NodeID) { t.Logf("read %v", n) }).Return((*storagepb.SubtreeProto)(nil), nil) } diff --git a/storage/memory/tree_debug.go b/storage/memory/tree_debug.go index 82899c94a0..04fac1ceb6 100644 --- a/storage/memory/tree_debug.go +++ b/storage/memory/tree_debug.go @@ -17,6 +17,7 @@ package memory import ( "github.com/golang/glog" "github.com/google/btree" + "github.com/google/trillian/node" "github.com/google/trillian/storage" "github.com/google/trillian/storage/storagepb" ) @@ -38,7 +39,7 @@ func Dump(t *btree.BTree) { func DumpSubtrees(ls storage.LogStorage, treeID int64, callback func(string, *storagepb.SubtreeProto)) { m := ls.(*memoryLogStorage) tree := m.trees[treeID] - pi := subtreeKey(treeID, 0, storage.NewEmptyNodeID(64)) + pi := subtreeKey(treeID, 0, node.NewEmptyNodeID(64)) tree.store.AscendGreaterOrEqual(pi, func(bi btree.Item) bool { i := bi.(*kv) diff --git a/storage/memory/tree_storage.go b/storage/memory/tree_storage.go index ee9b0daba9..0f7f4ce765 100644 --- a/storage/memory/tree_storage.go +++ b/storage/memory/tree_storage.go @@ -25,6 +25,7 @@ import ( "github.com/golang/protobuf/proto" "github.com/google/btree" "github.com/google/trillian" + "github.com/google/trillian/node" "github.com/google/trillian/storage" "github.com/google/trillian/storage/cache" "github.com/google/trillian/storage/storagepb" @@ -35,7 +36,7 @@ const degree = 8 // unseqKey formats a key for use in a tree's BTree store. // The associated Item value will be the stubtreeProto with the given nodeID // prefix. -func subtreeKey(treeID, rev int64, nodeID storage.NodeID) btree.Item { +func subtreeKey(treeID, rev int64, nodeID node.NodeID) btree.Item { return &kv{k: fmt.Sprintf("/%d/subtree/%s/%d", treeID, nodeID.String(), rev)} } @@ -162,8 +163,8 @@ type treeTX struct { unlock func() } -func (t *treeTX) getSubtree(ctx context.Context, treeRevision int64, nodeID storage.NodeID) (*storagepb.SubtreeProto, error) { - s, err := t.getSubtrees(ctx, treeRevision, []storage.NodeID{nodeID}) +func (t *treeTX) getSubtree(ctx context.Context, treeRevision int64, nodeID node.NodeID) (*storagepb.SubtreeProto, error) { + s, err := t.getSubtrees(ctx, treeRevision, []node.NodeID{nodeID}) if err != nil { return nil, err } @@ -177,7 +178,7 @@ func (t *treeTX) getSubtree(ctx context.Context, treeRevision int64, nodeID stor } } -func (t *treeTX) getSubtrees(ctx context.Context, treeRevision int64, nodeIDs []storage.NodeID) ([]*storagepb.SubtreeProto, error) { +func (t *treeTX) getSubtrees(ctx context.Context, treeRevision int64, nodeIDs []node.NodeID) ([]*storagepb.SubtreeProto, error) { if len(nodeIDs) == 0 { return nil, nil } @@ -228,20 +229,20 @@ func (t *treeTX) storeSubtrees(ctx context.Context, subtrees []*storagepb.Subtre // getSubtreesAtRev returns a GetSubtreesFunc which reads at the passed in rev. func (t *treeTX) getSubtreesAtRev(ctx context.Context, rev int64) cache.GetSubtreesFunc { - return func(ids []storage.NodeID) ([]*storagepb.SubtreeProto, error) { + return func(ids []node.NodeID) ([]*storagepb.SubtreeProto, error) { return t.getSubtrees(ctx, rev, ids) } } // GetMerkleNodes returns the requests nodes at (or below) the passed in treeRevision. -func (t *treeTX) GetMerkleNodes(ctx context.Context, treeRevision int64, nodeIDs []storage.NodeID) ([]storage.Node, error) { +func (t *treeTX) GetMerkleNodes(ctx context.Context, treeRevision int64, nodeIDs []node.NodeID) ([]storage.Node, error) { return t.subtreeCache.GetNodes(nodeIDs, t.getSubtreesAtRev(ctx, treeRevision)) } func (t *treeTX) SetMerkleNodes(ctx context.Context, nodes []storage.Node) error { for _, n := range nodes { err := t.subtreeCache.SetNodeHash(n.NodeID, n.Hash, - func(nID storage.NodeID) (*storagepb.SubtreeProto, error) { + func(nID node.NodeID) (*storagepb.SubtreeProto, error) { return t.getSubtree(ctx, t.writeRevision, nID) }) if err != nil { diff --git a/storage/mock_storage.go b/storage/mock_storage.go index 08290ef1fb..b79c210fa0 100644 --- a/storage/mock_storage.go +++ b/storage/mock_storage.go @@ -5,10 +5,11 @@ package storage import ( context "context" - gomock "github.com/golang/mock/gomock" - trillian "github.com/google/trillian" reflect "reflect" time "time" + + gomock "github.com/golang/mock/gomock" + trillian "github.com/google/trillian" ) // MockAdminStorage is a mock of AdminStorage interface diff --git a/storage/mysql/storage_test.go b/storage/mysql/storage_test.go index 0a6c251538..3ef26a1b87 100644 --- a/storage/mysql/storage_test.go +++ b/storage/mysql/storage_test.go @@ -29,6 +29,7 @@ import ( "github.com/google/trillian" "github.com/google/trillian/merkle" "github.com/google/trillian/merkle/rfc6962" + "github.com/google/trillian/node" "github.com/google/trillian/storage" storageto "github.com/google/trillian/storage/testonly" ) @@ -42,7 +43,7 @@ func TestNodeRoundTrip(t *testing.T) { const writeRevision = int64(100) nodesToStore := createSomeNodes() - nodeIDsToRead := make([]storage.NodeID, len(nodesToStore)) + nodeIDsToRead := make([]node.NodeID, len(nodesToStore)) for i := range nodesToStore { nodeIDsToRead[i] = nodesToStore[i].NodeID } @@ -93,7 +94,7 @@ func TestLogNodeRoundTripMultiSubtree(t *testing.T) { if err != nil { t.Fatalf("failed to create test tree: %v", err) } - nodeIDsToRead := make([]storage.NodeID, len(nodesToStore)) + nodeIDsToRead := make([]node.NodeID, len(nodesToStore)) for i := range nodesToStore { nodeIDsToRead[i] = nodesToStore[i].NodeID } diff --git a/storage/mysql/tree_storage.go b/storage/mysql/tree_storage.go index f08e7e710c..743efd0d8a 100644 --- a/storage/mysql/tree_storage.go +++ b/storage/mysql/tree_storage.go @@ -25,6 +25,7 @@ import ( "github.com/golang/glog" "github.com/golang/protobuf/proto" + "github.com/google/trillian/node" "github.com/google/trillian/storage" "github.com/google/trillian/storage/cache" "github.com/google/trillian/storage/storagepb" @@ -168,8 +169,8 @@ type treeTX struct { writeRevision int64 } -func (t *treeTX) getSubtree(ctx context.Context, treeRevision int64, nodeID storage.NodeID) (*storagepb.SubtreeProto, error) { - s, err := t.getSubtrees(ctx, treeRevision, []storage.NodeID{nodeID}) +func (t *treeTX) getSubtree(ctx context.Context, treeRevision int64, nodeID node.NodeID) (*storagepb.SubtreeProto, error) { + s, err := t.getSubtrees(ctx, treeRevision, []node.NodeID{nodeID}) if err != nil { return nil, err } @@ -183,7 +184,7 @@ func (t *treeTX) getSubtree(ctx context.Context, treeRevision int64, nodeID stor } } -func (t *treeTX) getSubtrees(ctx context.Context, treeRevision int64, nodeIDs []storage.NodeID) ([]*storagepb.SubtreeProto, error) { +func (t *treeTX) getSubtrees(ctx context.Context, treeRevision int64, nodeIDs []node.NodeID) ([]*storagepb.SubtreeProto, error) { glog.V(4).Infof("getSubtrees(") if len(nodeIDs) == 0 { return nil, nil @@ -359,20 +360,20 @@ func (t *treeTX) GetTreeRevisionIncludingSize(ctx context.Context, treeSize int6 // getSubtreesAtRev returns a GetSubtreesFunc which reads at the passed in rev. func (t *treeTX) getSubtreesAtRev(ctx context.Context, rev int64) cache.GetSubtreesFunc { - return func(ids []storage.NodeID) ([]*storagepb.SubtreeProto, error) { + return func(ids []node.NodeID) ([]*storagepb.SubtreeProto, error) { return t.getSubtrees(ctx, rev, ids) } } // GetMerkleNodes returns the requests nodes at (or below) the passed in treeRevision. -func (t *treeTX) GetMerkleNodes(ctx context.Context, treeRevision int64, nodeIDs []storage.NodeID) ([]storage.Node, error) { +func (t *treeTX) GetMerkleNodes(ctx context.Context, treeRevision int64, nodeIDs []node.NodeID) ([]storage.Node, error) { return t.subtreeCache.GetNodes(nodeIDs, t.getSubtreesAtRev(ctx, treeRevision)) } func (t *treeTX) SetMerkleNodes(ctx context.Context, nodes []storage.Node) error { for _, n := range nodes { err := t.subtreeCache.SetNodeHash(n.NodeID, n.Hash, - func(nID storage.NodeID) (*storagepb.SubtreeProto, error) { + func(nID node.NodeID) (*storagepb.SubtreeProto, error) { return t.getSubtree(ctx, t.writeRevision, nID) }) if err != nil { diff --git a/storage/testonly/fake_node_reader.go b/storage/testonly/fake_node_reader.go index 22692baae8..be7fe6dd2c 100644 --- a/storage/testonly/fake_node_reader.go +++ b/storage/testonly/fake_node_reader.go @@ -22,6 +22,7 @@ import ( "github.com/golang/glog" "github.com/google/trillian/merkle" "github.com/google/trillian/merkle/rfc6962" + "github.com/google/trillian/node" "github.com/google/trillian/storage" ) @@ -33,7 +34,7 @@ import ( // NodeMapping is a struct we use because we can't use NodeIDs as map keys. Callers pass this // and FakeNodeReader internally manages derived keys. type NodeMapping struct { - NodeID storage.NodeID + NodeID node.NodeID Node storage.Node } @@ -76,7 +77,7 @@ func (f FakeNodeReader) GetTreeRevisionIncludingSize(treeSize int64) (int64, err } // GetMerkleNodes implements the corresponding NodeReader API. -func (f FakeNodeReader) GetMerkleNodes(treeRevision int64, NodeIDs []storage.NodeID) ([]storage.Node, error) { +func (f FakeNodeReader) GetMerkleNodes(treeRevision int64, NodeIDs []node.NodeID) ([]storage.Node, error) { if f.treeRevision > treeRevision { return nil, fmt.Errorf("GetMerkleNodes() got treeRevision:%d, want up to: %d", treeRevision, f.treeRevision) } @@ -95,7 +96,7 @@ func (f FakeNodeReader) GetMerkleNodes(treeRevision int64, NodeIDs []storage.Nod return nodes, nil } -func (f FakeNodeReader) hasID(nodeID storage.NodeID) bool { +func (f FakeNodeReader) hasID(nodeID node.NodeID) bool { _, ok := f.nodeMap[nodeID.String()] return ok } @@ -174,7 +175,7 @@ func NewMultiFakeNodeReaderFromLeaves(batches []LeafBatch) *MultiFakeNodeReader return NewMultiFakeNodeReader(readers) } -func (m MultiFakeNodeReader) readerForNodeID(nodeID storage.NodeID, revision int64) *FakeNodeReader { +func (m MultiFakeNodeReader) readerForNodeID(nodeID node.NodeID, revision int64) *FakeNodeReader { // Work backwards and use the first reader where the node is present and the revision is in range for i := len(m.readers) - 1; i >= 0; i-- { if m.readers[i].treeRevision <= revision && m.readers[i].hasID(nodeID) { @@ -197,7 +198,7 @@ func (m MultiFakeNodeReader) GetTreeRevisionIncludingSize(treeSize int64) (int64 } // GetMerkleNodes implements the corresponding NodeReader API. -func (m MultiFakeNodeReader) GetMerkleNodes(ctx context.Context, treeRevision int64, NodeIDs []storage.NodeID) ([]storage.Node, error) { +func (m MultiFakeNodeReader) GetMerkleNodes(ctx context.Context, treeRevision int64, NodeIDs []node.NodeID) ([]storage.Node, error) { // Find the correct reader for the supplied tree revision. This must be done for each node // as earlier revisions may still be relevant nodes := make([]storage.Node, 0, len(NodeIDs)) @@ -209,7 +210,7 @@ func (m MultiFakeNodeReader) GetMerkleNodes(ctx context.Context, treeRevision in fmt.Errorf("want nodeID: %v with revision <= %d but no reader has it\n%v", nID, treeRevision, m) } - node, err := reader.GetMerkleNodes(treeRevision, []storage.NodeID{nID}) + node, err := reader.GetMerkleNodes(treeRevision, []node.NodeID{nID}) if err != nil { return nil, err } diff --git a/storage/testonly/matchers.go b/storage/testonly/matchers.go index b10fa5e2a9..a2b5b31f72 100644 --- a/storage/testonly/matchers.go +++ b/storage/testonly/matchers.go @@ -20,12 +20,13 @@ import ( "sort" "github.com/golang/mock/gomock" + "github.com/google/trillian/node" "github.com/google/trillian/storage" "github.com/google/trillian/storage/storagepb" ) type subtreeHasPrefix struct { - expectedID storage.NodeID + expectedID node.NodeID } func (s subtreeHasPrefix) Matches(x interface{}) bool { @@ -42,11 +43,11 @@ func (s subtreeHasPrefix) String() string { } type nodeIDEq struct { - expectedID storage.NodeID + expectedID node.NodeID } func (m nodeIDEq) Matches(x interface{}) bool { - n, ok := x.(storage.NodeID) + n, ok := x.(node.NodeID) if !ok { return false } @@ -58,7 +59,7 @@ func (m nodeIDEq) String() string { } // NodeIDEq returns a matcher that expects the specified NodeID. -func NodeIDEq(n storage.NodeID) gomock.Matcher { +func NodeIDEq(n node.NodeID) gomock.Matcher { return nodeIDEq{n} } diff --git a/storage/testonly/nodes.go b/storage/testonly/nodes.go index f6d9433d13..84ed95e2ba 100644 --- a/storage/testonly/nodes.go +++ b/storage/testonly/nodes.go @@ -15,13 +15,11 @@ // Package testonly holds test-specific code for Trillian storage layers. package testonly -import ( - "github.com/google/trillian/storage" -) +import "github.com/google/trillian/node" // MustCreateNodeIDForTreeCoords creates a NodeID for the given position in the tree. -func MustCreateNodeIDForTreeCoords(depth, index int64, maxPathBits int) storage.NodeID { - n, err := storage.NewNodeIDForTreeCoords(depth, index, maxPathBits) +func MustCreateNodeIDForTreeCoords(depth, index int64, maxPathBits int) node.NodeID { + n, err := node.NewNodeIDForTreeCoords(depth, index, maxPathBits) if err != nil { panic(err) } diff --git a/storage/tools/dump_tree/dumplib/dumplib.go b/storage/tools/dump_tree/dumplib/dumplib.go index eeeda215f8..726af7b560 100644 --- a/storage/tools/dump_tree/dumplib/dumplib.go +++ b/storage/tools/dump_tree/dumplib/dumplib.go @@ -42,6 +42,7 @@ import ( "github.com/google/trillian/merkle/hashers" "github.com/google/trillian/merkle/rfc6962" "github.com/google/trillian/monitoring" + "github.com/google/trillian/node" "github.com/google/trillian/quota" "github.com/google/trillian/storage" "github.com/google/trillian/storage/cache" @@ -422,15 +423,15 @@ func traverseTreeStorage(ls storage.LogStorage, treeID int64, ts int, rev int64) } for level := int64(0); level < levels; level++ { - for node := int64(0); node < nodesAtLevel; node++ { + for index := int64(0); index < nodesAtLevel; index++ { // We're going to request one node at a time, which would normally be slow but we have // the tree in RAM so it's not a real problem. - nodeID, err := storage.NewNodeIDForTreeCoords(level, node, 64) + nodeID, err := node.NewNodeIDForTreeCoords(level, index, 64) if err != nil { - glog.Fatalf("NewNodeIDForTreeCoords: (%d, %d): got: %v, want: no err", level, node, err) + glog.Fatalf("NewNodeIDForTreeCoords: (%d, %d): got: %v, want: no err", level, index, err) } - nodes, err := tx.GetMerkleNodes(context.TODO(), rev, []storage.NodeID{nodeID}) + nodes, err := tx.GetMerkleNodes(context.TODO(), rev, []node.NodeID{nodeID}) if err != nil { glog.Fatalf("GetMerkleNodes: %s: %v", nodeID.CoordString(), err) } @@ -438,7 +439,7 @@ func traverseTreeStorage(ls storage.LogStorage, treeID int64, ts int, rev int64) glog.Fatalf("GetMerkleNodes: %s: want 1 node got: %v", nodeID.CoordString(), nodes) } - fmt.Fprintf(out, "%6d %6d -> %s\n", level, node, hex.EncodeToString(nodes[0].Hash)) + fmt.Fprintf(out, "%6d %6d -> %s\n", level, index, hex.EncodeToString(nodes[0].Hash)) } nodesAtLevel = nodesAtLevel >> 1 diff --git a/storage/types.go b/storage/types.go index c60f2ea39c..04a6234f04 100644 --- a/storage/types.go +++ b/storage/types.go @@ -15,12 +15,9 @@ package storage import ( - "bytes" - "encoding/binary" "fmt" - "math/big" - "github.com/golang/glog" + "github.com/google/trillian/node" "github.com/google/trillian/storage/storagepb" ) @@ -44,292 +41,11 @@ func (s Error) Error() string { // Node represents a single node in a Merkle tree. type Node struct { - NodeID NodeID + NodeID node.NodeID Hash []byte NodeRevision int64 } -// NodeID uniquely identifies a Node within a versioned MerkleTree. -type NodeID struct { - // path is effectively a BigEndian bit set, with path[0] being the MSB - // (identifying the root child), and successive bits identifying the lower - // level children down to the leaf. - Path []byte - // PrefixLenBits is the number of MSB in Path which are considered part of - // this NodeID. - // - // e.g. if Path contains two bytes, and PrefixLenBits is 9, then the 8 bits - // in Path[0] are included, along with the lowest bit of Path[1] - PrefixLenBits int -} - -// PathLenBits returns 8 * len(path). -func (n NodeID) PathLenBits() int { - return len(n.Path) * 8 -} - -// bytesForBits returns the number of bytes required to store numBits bits. -func bytesForBits(numBits int) int { - return (numBits + 7) >> 3 -} - -// NewNodeIDFromHash creates a new NodeID for the given Hash. -func NewNodeIDFromHash(h []byte) NodeID { - return NodeID{ - Path: h, - PrefixLenBits: len(h) * 8, - } -} - -// NewEmptyNodeID creates a new zero-length NodeID with sufficient underlying -// capacity to store a maximum of maxLenBits. -func NewEmptyNodeID(maxLenBits int) NodeID { - if got, want := maxLenBits%8, 0; got != want { - panic(fmt.Sprintf("storeage: NewEmptyNodeID() maxLenBits mod 8: %v, want %v", got, want)) - } - return NodeID{ - Path: make([]byte, maxLenBits/8), - PrefixLenBits: 0, - } -} - -// NewNodeIDFromPrefix returns a nodeID for a particular node within a subtree. -// Prefix is the prefix of the subtree. -// depth is the depth of index from the root of the subtree. -// index is the horizontal location of the subtree leaf. -// subDepth is the total number of levels in the subtree. -// totalDepth is the number of levels in the whole tree. -func NewNodeIDFromPrefix(prefix []byte, depth int, index int64, subDepth, totalDepth int) NodeID { - if got, want := totalDepth%8, 0; got != want || got < want { - panic(fmt.Sprintf("storage NewNodeFromPrefix(): totalDepth mod 8: %v, want %v", got, want)) - } - if got, want := subDepth%8, 0; got != want || got < want { - panic(fmt.Sprintf("storage NewNodeFromPrefix(): subDepth mod 8: %v, want %v", got, want)) - } - if got, want := depth, 0; got < want { - panic(fmt.Sprintf("storage NewNodeFromPrefix(): depth: %v, want >= %v", got, want)) - } - - // Put prefix in the MSB bits of path. - path := make([]byte, totalDepth/8) - copy(path, prefix) - - // Convert index into absolute coordinates for subtree. - height := subDepth - depth - subIndex := index << uint(height) // index is the horizontal index at the given height. - - // Copy subDepth/8 bytes of subIndex into path. - subPath := new(bytes.Buffer) - binary.Write(subPath, binary.BigEndian, uint64(subIndex)) - unusedHighBytes := 64/8 - subDepth/8 - copy(path[len(prefix):], subPath.Bytes()[unusedHighBytes:]) - - return NodeID{ - Path: path, - PrefixLenBits: len(prefix)*8 + depth, - } -} - -// NewNodeIDFromRelativeBigInt returns a NodeID given by a subtree and a subtree index. -// depth is the number of levels down from the top of the subtree -// subIndex is the path from the root of the subtree to the desired node, and continuing down to the bottom of the subtree. -// subIndex = horizontal index << height. -func NewNodeIDFromRelativeBigInt(prefix []byte, subtreeDepth, depth int, subIndex *big.Int, totalDepth int) NodeID { - // Put prefix in the MSB bits of path. - path := make([]byte, totalDepth/8) - copy(path, prefix) - - // Copy subIndex into subPath, right justified. - subPath := path[len(prefix) : len(prefix)+subtreeDepth/8] - unusedSubBytes := len(subPath) - len(subIndex.Bytes()) - copy(subPath[unusedSubBytes:], subIndex.Bytes()) - - glog.V(5).Infof("NewNodeIDFromRelativeBigInt({%x, %v}, %v, %x, %v): %v, %x", - prefix, subtreeDepth, depth, subIndex.Bytes(), totalDepth, len(prefix)*8+depth, path) - return NodeID{ - Path: path, - PrefixLenBits: len(prefix)*8 + depth, - } -} - -// NewNodeIDFromBigInt returns a NodeID of a big.Int with no prefix. -// index contains the path's least significant bits. -// depth indicates the number of bits from the most significant bit to treat as part of the path. -func NewNodeIDFromBigInt(depth int, index *big.Int, totalDepth int) NodeID { - if got, want := totalDepth%8, 0; got != want || got < want { - panic(fmt.Sprintf("storage NewNodeFromBitInt(): totalDepth mod 8: %v, want %v", got, want)) - } - - // Put index in the LSB bits of path. - path := make([]byte, totalDepth/8) - unusedHighBytes := len(path) - len(index.Bytes()) - copy(path[unusedHighBytes:], index.Bytes()) - - // TODO(gdbelvin): consider masking off insignificant bits past depth. - - return NodeID{ - Path: path, - PrefixLenBits: depth, - } -} - -// NewNodeIDWithPrefix creates a new NodeID of nodeIDLen bits with the prefixLen MSBs set to prefix. -// NewNodeIDWithPrefix places the lower prefixLenBits of prefix in the most significant bits of path. -// Path will have enough bytes to hold maxLenBits -// -func NewNodeIDWithPrefix(prefix uint64, prefixLenBits, nodeIDLenBits, maxLenBits int) NodeID { - if got, want := nodeIDLenBits%8, 0; got != want { - panic(fmt.Sprintf("nodeIDLenBits mod 8: %v, want %v", got, want)) - } - maxLenBytes := bytesForBits(maxLenBits) - p := NodeID{ - Path: make([]byte, maxLenBytes), - PrefixLenBits: nodeIDLenBits, - } - - bit := maxLenBits - prefixLenBits - for i := 0; i < prefixLenBits; i++ { - if prefix&1 != 0 { - p.SetBit(bit, 1) - } - bit++ - prefix >>= 1 - } - return p -} - -func bitLen(x int64) int { - r := 0 - for x > 0 { - r++ - x >>= 1 - } - return r -} - -// NewNodeIDForTreeCoords creates a new NodeID for a Tree node with a specified depth and -// index. -// This method is used exclusively by the Log, and, since the Log model grows upwards from the -// leaves, we modify the provided coords accordingly. -// -// depth is the Merkle tree level: 0 = leaves, and increases upwards towards the root. -// -// index is the horizontal index into the tree at level depth, so the returned -// NodeID will be zero padded on the right by depth places. -func NewNodeIDForTreeCoords(depth int64, index int64, maxPathBits int) (NodeID, error) { - bl := bitLen(index) - if index < 0 || depth < 0 || - bl > int(maxPathBits-int(depth)) || - maxPathBits%8 != 0 { - return NodeID{}, fmt.Errorf("depth/index combination out of range: depth=%d index=%d maxPathBits=%v", depth, index, maxPathBits) - } - // This node is effectively a prefix of the subtree underneath (for non-leaf - // depths), so we shift the index accordingly. - uidx := uint64(index) << uint(depth) - r := NewEmptyNodeID(maxPathBits) - for i := len(r.Path) - 1; uidx > 0 && i >= 0; i-- { - r.Path[i] = byte(uidx & 0xff) - uidx >>= 8 - } - // In the storage model nodes closer to the leaves have longer nodeIDs, so - // we "reverse" depth here: - r.PrefixLenBits = int(maxPathBits - int(depth)) - return r, nil -} - -// SetBit sets the ith bit to true if b is non-zero, and false otherwise. -func (n *NodeID) SetBit(i int, b uint) { - // TODO(al): investigate whether having lookup tables for these might be - // faster. - bIndex := (n.PathLenBits() - i - 1) / 8 - if b == 0 { - n.Path[bIndex] &= ^(1 << uint(i%8)) - } else { - n.Path[bIndex] |= (1 << uint(i%8)) - } -} - -// Bit returns 1 if the ith bit is true, and false otherwise. -func (n *NodeID) Bit(i int) uint { - if got, want := i, n.PathLenBits()-1; got > want { - panic(fmt.Sprintf("storage: Bit(%v) > (PathLenBits() -1): %v", got, want)) - } - bIndex := (n.PathLenBits() - i - 1) / 8 - return uint((n.Path[bIndex] >> uint(i%8)) & 0x01) -} - -// String returns a string representation of the binary value of the NodeID. -// The left-most bit is the MSB (i.e. nearer the root of the tree). -func (n *NodeID) String() string { - var r bytes.Buffer - limit := n.PathLenBits() - n.PrefixLenBits - for i := n.PathLenBits() - 1; i >= limit; i-- { - r.WriteRune(rune('0' + n.Bit(i))) - } - return r.String() -} - -// CoordString returns a string representation assuming that the NodeID represents a -// tree coordinate. Using this on a NodeID for a sparse Merkle tree will give incorrect -// results. Intended for debugging purposes, the format could change. -func (n *NodeID) CoordString() string { - d := uint64(n.PathLenBits() - n.PrefixLenBits) - i := uint64(0) - for _, p := range n.Path { - i = (i << uint64(8)) + uint64(p) - } - - return fmt.Sprintf("[d:%d, i:%d]", d, i>>d) -} - -// Siblings returns the siblings of the given node. -func (n *NodeID) Siblings() []NodeID { - r := make([]NodeID, n.PrefixLenBits, n.PrefixLenBits) - l := n.PrefixLenBits - // Index of the bit to twiddle: - bi := n.PathLenBits() - n.PrefixLenBits - for i := 0; i < len(r); i++ { - r[i].PrefixLenBits = l - i - r[i].Path = make([]byte, len(n.Path)) - copy(r[i].Path, n.Path) - r[i].SetBit(bi, n.Bit(bi)^1) - bi++ - } - return r -} - -// Split splits a NodeID into a prefix and a suffix at prefixSplit -func (n *NodeID) Split(prefixBytes, suffixBits int) ([]byte, Suffix) { - if n.PrefixLenBits == 0 { - return []byte{}, Suffix{Bits: 0, Path: []byte{0}} - } - a := make([]byte, len(n.Path)) - copy(a, n.Path) - - bits := n.PrefixLenBits - prefixBytes*8 - if bits > suffixBits { - panic(fmt.Sprintf("storage Split: %x(n.PrefixLenBits: %v - prefixBytes: %v *8) > %v", n.Path, n.PrefixLenBits, prefixBytes, suffixBits)) - } - if bits == 0 { - panic(fmt.Sprintf("storage Split: %x(n.PrefixLenBits: %v - prefixBytes: %v *8) == 0", n.Path, n.PrefixLenBits, prefixBytes)) - } - suffixBytes := bytesForBits(bits) - sfx := Suffix{ - Bits: byte(bits), - Path: a[prefixBytes : prefixBytes+suffixBytes], - } - maskIndex := (bits - 1) / 8 - maskLowBits := (sfx.Bits-1)%8 + 1 - sfx.Path[maskIndex] &= ((0x01 << maskLowBits) - 1) << uint(8-maskLowBits) - - return a[:prefixBytes], sfx -} - -// Equivalent return true iff the other represents the same path prefix as this NodeID. -func (n *NodeID) Equivalent(other NodeID) bool { - return n.String() == other.String() -} - // PopulateSubtreeFunc is a function which knows how to re-populate a subtree // from just its leaf nodes. type PopulateSubtreeFunc func(*storagepb.SubtreeProto) error