Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
27 changes: 26 additions & 1 deletion cache/remotecache/v1/cachestorage.go
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,12 @@ func (cs *cacheKeyStorage) Exists(id string) bool {
return ok
}

func (cs *cacheKeyStorage) Walk(func(id string) error) error {
func (cs *cacheKeyStorage) Walk(cb func(id string) error) error {
for id := range cs.byID {
if err := cb(id); err != nil {
return err
}
}
return nil
}

Expand Down Expand Up @@ -142,6 +147,26 @@ func (cs *cacheKeyStorage) Release(resultID string) error {
func (cs *cacheKeyStorage) AddLink(id string, link solver.CacheInfoLink, target string) error {
return nil
}

func (cs *cacheKeyStorage) WalkLinksAll(id string, fn func(id string, link solver.CacheInfoLink) error) error {
it, ok := cs.byID[id]
if !ok {
return nil
}
for nl, ids := range it.links {
for _, id2 := range ids {
if err := fn(id2, solver.CacheInfoLink{
Input: solver.Index(nl.input),
Selector: digest.Digest(nl.selector),
Digest: nl.dgst,
}); err != nil {
return err
}
}
}
return nil
}

func (cs *cacheKeyStorage) WalkLinks(id string, link solver.CacheInfoLink, fn func(id string) error) error {
it, ok := cs.byID[id]
if !ok {
Expand Down
97 changes: 78 additions & 19 deletions cmd/buildkitd/debug.go
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
package main

import (
"cmp"
"context"
"encoding/binary"
"encoding/json"
"expvar"
"fmt"
Expand All @@ -15,6 +17,7 @@ import (
"strings"
"time"

cacheimport "github.com/moby/buildkit/cache/remotecache/v1"
"github.com/moby/buildkit/solver"
"github.com/moby/buildkit/util/bklog"
"github.com/moby/buildkit/util/cachedigest"
Expand All @@ -40,6 +43,7 @@ func setupDebugHandlers(addr string) error {
m.Handle("/debug/cache/all", http.HandlerFunc(handleCacheAll))
m.Handle("/debug/cache/lookup", http.HandlerFunc(handleCacheLookup))
m.Handle("/debug/cache/store", http.HandlerFunc(handleDebugCacheStore))
m.Handle("POST /debug/cache/load", http.HandlerFunc(handleCacheLoad))
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Wonder if we could have a /debug/cache/import endpoint that could use the same CSV attributes as cache-from to make this easier? Like type=registry,ref=foo/bar:cache

Copy link
Copy Markdown
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This can't be easily done like this as things like authentication require buildkit features like grpc session tunnel and this is a completely different debug API endpoint. I think the way UX can work eventually is that something like buildx can fetch the config on the client side and then upload to the debug endpoint.


m.Handle("/debug/gc", http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
runtime.GC()
Expand Down Expand Up @@ -139,9 +143,13 @@ func printCacheRecord(record *cachedigest.Record, w io.Writer) {
case cachedigest.FrameIDData:
w.Write([]byte(" " + frame.ID.String() + ": " + string(frame.Data) + "\n"))
case cachedigest.FrameIDSkip:
w.Write([]byte(" skipping " + string(frame.Data) + " bytes\n"))
fmt.Fprintf(w, " skipping %d bytes\n", binary.LittleEndian.Uint32(frame.Data))
}
}
for _, subRec := range record.SubRecords {
w.Write([]byte("\n"))
printCacheRecord(subRec, w)
}
}

func cacheRecordLookup(ctx context.Context, dgst digest.Digest) (*cachedigest.Record, error) {
Expand Down Expand Up @@ -216,18 +224,70 @@ func loadCacheAll(ctx context.Context) ([]*cachedigest.Record, error) {
return records, nil
}

func handleCacheLoad(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
return
}
if r.Body == nil {
http.Error(w, "body is required", http.StatusBadRequest)
return
}
defer r.Body.Close()

recs, err := loadCacheFromReader(r.Context(), r.Body)
if err != nil {
http.Error(w, "failed to load cache: "+err.Error(), http.StatusInternalServerError)
return
}
writeCacheRecordsResponse(w, r, recs)
}

func loadCacheFromReader(ctx context.Context, rdr io.Reader) ([]*recordWithDebug, error) {
dt, err := io.ReadAll(rdr)
if err != nil {
return nil, errors.Wrap(err, "failed to read body")
}

allLayers := cacheimport.DescriptorProvider{}
cc := cacheimport.NewCacheChains()
if err := cacheimport.Parse(dt, allLayers, cc); err != nil {
return nil, err
}

keyStorage, _, err := cacheimport.NewCacheKeyStorage(cc, nil)
if err != nil {
return nil, err
}

recs, err := debugCacheStore(ctx, keyStorage)
if err != nil {
return nil, errors.Wrap(err, "failed to debug cache store")
}

return recs, nil
}

func handleDebugCacheStore(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}

recs, err := debugCacheStore(r.Context())
store := cacheStoreForDebug
if store == nil {
http.Error(w, "Cache store is not initialized for debug", http.StatusInternalServerError)
}

recs, err := debugCacheStore(r.Context(), store)
if err != nil {
http.Error(w, "Failed to debug cache store: "+err.Error(), http.StatusInternalServerError)
return
}
writeCacheRecordsResponse(w, r, recs)
}

func writeCacheRecordsResponse(w http.ResponseWriter, r *http.Request, recs []*recordWithDebug) {
w.WriteHeader(http.StatusOK)

switch r.Header.Get("Accept") {
Expand All @@ -250,24 +310,28 @@ func handleDebugCacheStore(w http.ResponseWriter, r *http.Request) {
if rec.Digest != "" {
fmt.Fprintf(w, "Digest: %s\n", rec.Digest)
}

if len(rec.Parents) > 0 {
fmt.Fprintln(w, "Parents:")
for input := range rec.Parents {
ids := slices.Collect(maps.Keys(rec.ParentIDs[input]))
s := make([]string, len(ids))
for i, id := range ids {
s[i] = fmt.Sprintf("%d", id)
slices.SortStableFunc(rec.Parents, func(i, j cachestore.Link) int {
return cmp.Or(cmp.Compare(i.Input, j.Input), cmp.Compare(i.Digest, j.Digest))
})
for _, parent := range rec.Parents {
fmt.Fprintf(w, " Input %d:\t%d\t%s\n", parent.Input, parent.Record.ID, parent.Digest)
if parent.Selector != "" {
fmt.Fprintf(w, " Selector: %s\n", parent.Selector)
}
fmt.Fprintf(w, " Input %d:\t %s\n", input, strings.Join(s, ", "))
}
}
if len(rec.Children) > 0 {
fmt.Fprintln(w, "Children:")
for _, child := range rec.Children {
fmt.Fprintf(w, " %d %s (input %d, output %d)\n", child.Record.ID, child.Digest, child.Input, child.Output)
if child.Selector != "" {
fmt.Fprintf(w, " Selector: %s\n", child.Selector)
for input := range rec.Children {
ids := slices.Collect(maps.Keys(rec.ChildIDs[input]))
s := make([]string, len(ids))
for i, id := range ids {
s[i] = fmt.Sprintf("%d", id)
}
fmt.Fprintf(w, " Input %d:\t %s\n", input, strings.Join(s, ", "))
}
}
if len(rec.Debug) > 0 {
Expand All @@ -287,12 +351,7 @@ type recordWithDebug struct {
Debug []*cachedigest.Record `json:"debug,omitempty"`
}

func debugCacheStore(ctx context.Context) ([]*recordWithDebug, error) {
store := cacheStoreForDebug
if store == nil {
return nil, errors.New("cache store is not initialized for debug")
}

func debugCacheStore(ctx context.Context, store solver.CacheKeyStorage) ([]*recordWithDebug, error) {
recs, err := cachestore.Records(ctx, store)
if err != nil {
return nil, errors.Wrap(err, "failed to get cache records")
Expand All @@ -307,7 +366,7 @@ func debugCacheStore(ctx context.Context) ([]*recordWithDebug, error) {
if rec.Digest != "" {
m[rec.Digest] = nil
}
for _, link := range rec.Children {
for _, link := range rec.Parents {
m[link.Digest] = nil
if link.Selector != "" {
m[link.Selector] = nil
Expand Down
2 changes: 2 additions & 0 deletions solver/bboltcachestorage/storage.go
Original file line number Diff line number Diff line change
Expand Up @@ -366,6 +366,8 @@ func (s *Store) WalkLinksAll(id string, fn func(id string, link solver.CacheInfo
if err := json.Unmarshal(parts[0], &link); err != nil {
return err
}
// make digest relative to output as not all backends store output separately
link.Digest = digest.FromBytes(fmt.Appendf(nil, "%s@%d", link.Digest, link.Output))
links = append(links, linkEntry{
id: string(parts[1]),
link: link,
Expand Down
2 changes: 1 addition & 1 deletion util/cachedigest/db_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ func TestNewHashAndGet(t *testing.T) {
dataFrames = append(dataFrames, f.Data)
case FrameIDSkip:
require.Len(t, f.Data, 4)
skipLens = append(skipLens, uint32(f.Data[0])<<24|uint32(f.Data[1])<<16|uint32(f.Data[2])<<8|uint32(f.Data[3]))
skipLens = append(skipLens, uint32(f.Data[3])<<24|uint32(f.Data[2])<<16|uint32(f.Data[1])<<8|uint32(f.Data[0]))
}
}
require.Len(t, dataFrames, len(inputs))
Expand Down
17 changes: 11 additions & 6 deletions util/cachedigest/digest.go
Original file line number Diff line number Diff line change
Expand Up @@ -67,11 +67,11 @@ func (h *Hash) WriteNoDebug(p []byte) (n int, err error) {
if n > 0 && h.db != nil {
if len(h.frames) > 0 && h.frames[len(h.frames)-1].ID == FrameIDSkip {
last := &h.frames[len(h.frames)-1]
prevLen := binary.BigEndian.Uint32(last.Data)
binary.BigEndian.PutUint32(last.Data, prevLen+uint32(n))
prevLen := binary.LittleEndian.Uint32(last.Data)
binary.LittleEndian.PutUint32(last.Data, prevLen+uint32(n))
} else {
lenBytes := make([]byte, 4)
binary.BigEndian.PutUint32(lenBytes, uint32(n))
binary.LittleEndian.PutUint32(lenBytes, uint32(n))
h.frames = append(h.frames, Frame{ID: FrameIDSkip, Data: lenBytes})
}
}
Expand All @@ -94,7 +94,7 @@ type Record struct {
Digest digest.Digest `json:"digest"`
Type Type `json:"type"`
Data []Frame `json:"data,omitempty"`
SubRecords []Record `json:"subRecords,omitempty"`
SubRecords []*Record `json:"subRecords,omitempty"`
}

var shaRegexpOnce = sync.OnceValue(func() *regexp.Regexp {
Expand Down Expand Up @@ -149,11 +149,16 @@ func (r *Record) LoadSubRecords(loader func(d digest.Digest) (Type, []Frame, err
bklog.L.Warnf("failed to load sub-record for %s: %v", dgst, err)
continue
}
r.SubRecords = append(r.SubRecords, Record{
rr := &Record{
Digest: digest.Digest(dgst),
Type: typ,
Data: frames,
})
}
if err := rr.LoadSubRecords(loader); err != nil {
return err
}

r.SubRecords = append(r.SubRecords, rr)
}
return nil
}
59 changes: 34 additions & 25 deletions util/cachestore/store.go
Original file line number Diff line number Diff line change
@@ -1,7 +1,10 @@
package cachestore

import (
"cmp"
"context"
"maps"
"slices"
"strings"

"github.com/moby/buildkit/solver"
Expand All @@ -10,12 +13,12 @@ import (
)

type Record struct {
ID int `json:"id"`
Parents map[int]map[*Record]struct{} `json:"-"`
Children []Link `json:"children,omitempty"`
Digest digest.Digest `json:"digest,omitempty"`
Random bool `json:"random,omitempty"`
ParentIDs map[int]map[int]struct{} `json:"parents,omitempty"`
ID int `json:"id"`
Parents []Link `json:"parents,omitempty"`
Children map[int]map[*Record]struct{} `json:"-"`
Digest digest.Digest `json:"digest,omitempty"`
Random bool `json:"random,omitempty"`
ChildIDs map[int]map[int]struct{} `json:"children,omitempty"`
}

type Link struct {
Expand All @@ -35,7 +38,7 @@ type storeWithLinks interface {
func Records(ctx context.Context, store solver.CacheKeyStorage) ([]*Record, error) {
swl, ok := store.(storeWithLinks)
if !ok {
return nil, errors.New("cache store does not support walkin all links")
return nil, errors.New("cache store does not support walking all links")
}

roots := []string{}
Expand Down Expand Up @@ -72,16 +75,16 @@ func Records(ctx context.Context, store solver.CacheKeyStorage) ([]*Record, erro
}

func setLinkIDs(rec *Record) {
for i, child := range rec.Children {
child.ID = child.Record.ID
rec.Children[i] = child
for i, parent := range rec.Parents {
parent.ID = parent.Record.ID
rec.Parents[i] = parent
}
if rec.Parents != nil {
rec.ParentIDs = make(map[int]map[int]struct{})
for input, m := range rec.Parents {
rec.ParentIDs[input] = make(map[int]struct{})
for parent := range m {
rec.ParentIDs[input][parent.ID] = struct{}{}
if rec.Children != nil {
rec.ChildIDs = make(map[int]map[int]struct{})
for input, m := range rec.Children {
rec.ChildIDs[input] = make(map[int]struct{})
for child := range m {
rec.ChildIDs[input][child.ID] = struct{}{}
}
}
}
Expand All @@ -93,8 +96,14 @@ func setIndex(rec *Record, arr []*Record) []*Record {
}
arr = append(arr, rec)
rec.ID = len(arr)
for _, child := range rec.Children {
arr = setIndex(child.Record, arr)
for _, links := range rec.Children {
recs := slices.Collect(maps.Keys(links))
slices.SortFunc(recs, func(i, j *Record) int {
return cmp.Compare(i.Digest, j.Digest)
})
for _, child := range recs {
arr = setIndex(child, arr)
}
}
return arr
}
Expand Down Expand Up @@ -122,23 +131,23 @@ func loadRecord(ctx context.Context, store storeWithLinks, id string, out map[st
if err != nil {
return errors.Wrapf(err, "failed to load link %s for %s", linkID, id)
}
rec.Children = append(rec.Children, Link{
child.Parents = append(child.Parents, Link{
Input: int(link.Input),
Output: int(link.Output),
Selector: link.Selector,
Record: child,
Record: rec,
Digest: link.Digest,
})

if child.Parents == nil {
child.Parents = make(map[int]map[*Record]struct{})
if rec.Children == nil {
rec.Children = make(map[int]map[*Record]struct{})
}
m, ok := child.Parents[int(link.Input)]
m, ok := rec.Children[int(link.Output)]
if !ok {
m = make(map[*Record]struct{})
child.Parents[int(link.Input)] = m
rec.Children[int(link.Output)] = m
}
m[rec] = struct{}{}
m[child] = struct{}{}
return nil
})
if err != nil {
Expand Down