Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
19 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
41 changes: 26 additions & 15 deletions .github/workflows/build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -390,6 +390,7 @@ jobs:
run: |
./hack/images "${{ needs.release-base.outputs.tag }}" "$REPO_SLUG_TARGET" "${{ needs.release-base.outputs.push }}"
env:
RELEASE: ${{ startsWith(github.ref, 'refs/tags/v') }}
TARGET: ${{ matrix.target-stage }}
CACHE_FROM: type=gha,scope=${{ env.CACHE_GHA_SCOPE_CROSS }} type=gha,scope=image${{ matrix.target-stage }}
CACHE_TO: type=gha,scope=image${{ matrix.target-stage }}
Expand Down Expand Up @@ -419,6 +420,7 @@ jobs:
run: |
./hack/release-tar "${{ needs.release-base.outputs.tag }}" release-out
env:
RELEASE: ${{ startsWith(github.ref, 'refs/tags/v') }}
PLATFORMS: ${{ env.PLATFORMS }},darwin/amd64,darwin/arm64,windows/amd64,windows/arm64
CACHE_FROM: type=gha,scope=${{ env.CACHE_GHA_SCOPE_BINARIES }} type=gha,scope=${{ env.CACHE_GHA_SCOPE_CROSS }}
-
Expand All @@ -444,8 +446,8 @@ jobs:
if: github.event_name != 'schedule'
outputs:
typ: ${{ steps.prep.outputs.typ }}
tag: ${{ steps.prep.outputs.tag }}
push: ${{ steps.prep.outputs.push }}
matrix: ${{ steps.prep.outputs.matrix }}
steps:
-
name: Prepare
Expand All @@ -462,14 +464,30 @@ jobs:
PUSH=push
fi
echo "typ=${TYP}" >>${GITHUB_OUTPUT}
echo "tag=${TAG}" >>${GITHUB_OUTPUT}
echo "push=${PUSH}" >>${GITHUB_OUTPUT}
if [ "${TYP}" = "master" ]; then
echo "matrix=$(jq -cn --arg tag "$TAG" '[$tag, "labs"]')" >>${GITHUB_OUTPUT}
else
echo "matrix=$(jq -cn --arg tag "$TAG" '[$tag]')" >>${GITHUB_OUTPUT}
fi

frontend-image:
runs-on: ubuntu-20.04
if: github.event_name != 'schedule'
needs: [frontend-base, test]
strategy:
fail-fast: false
matrix:
tag: ${{ fromJson(needs.frontend-base.outputs.matrix) }}
steps:
-
name: Prepare
run: |
if [ "${{ matrix.tag }}" = "labs" ]; then
echo "CACHE_SCOPE=frontend-labs" >>${GITHUB_ENV}
else
echo "CACHE_SCOPE=frontend-mainline" >>${GITHUB_ENV}
fi
-
name: Checkout
uses: actions/checkout@v3
Expand All @@ -494,18 +512,11 @@ jobs:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
-
name: Build ${{ needs.frontend-base.outputs.typ }}/${{ needs.frontend-base.outputs.tag }}
run: |
./frontend/dockerfile/cmd/dockerfile-frontend/hack/release "${{ needs.frontend-base.outputs.typ }}" "${{ needs.frontend-base.outputs.tag }}" "$DF_REPO_SLUG_TARGET" "${{ needs.frontend-base.outputs.push }}"
env:
PLATFORMS: ${{ env.PLATFORMS }},linux/mips,linux/mipsle,linux/mips64,linux/mips64le
CACHE_FROM: type=gha,scope=frontend-${{ needs.frontend-base.outputs.typ }}
CACHE_TO: type=gha,scope=frontend-${{ needs.frontend-base.outputs.typ }}
-
name: Build ${{ needs.frontend-base.outputs.typ }}/labs
if: needs.frontend-base.outputs.typ == 'master'
name: Build
run: |
./frontend/dockerfile/cmd/dockerfile-frontend/hack/release "${{ needs.frontend-base.outputs.typ }}" labs "$DF_REPO_SLUG_TARGET" "${{ needs.frontend-base.outputs.push }}"
./frontend/dockerfile/cmd/dockerfile-frontend/hack/release "${{ needs.frontend-base.outputs.typ }}" "${{ matrix.tag }}" "$DF_REPO_SLUG_TARGET" "${{ needs.frontend-base.outputs.push }}"
env:
PLATFORMS: ${{ env.PLATFORMS }},linux/mips,linux/mipsle,linux/mips64,linux/mips64le
CACHE_FROM: type=gha,scope=frontend-${{ needs.frontend-base.outputs.typ }}
RELEASE: ${{ startsWith(github.ref, 'refs/tags/v') }}
PLATFORMS: ${{ env.PLATFORMS }},linux/386,linux/mips,linux/mipsle,linux/mips64,linux/mips64le
CACHE_FROM: type=gha,scope=${{ env.CACHE_SCOPE }}
CACHE_TO: type=gha,scope=${{ env.CACHE_SCOPE }}
4 changes: 1 addition & 3 deletions cache/manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -222,10 +222,8 @@ func (cm *cacheManager) GetByBlob(ctx context.Context, desc ocispecs.Descriptor,

id := identity.NewID()
snapshotID := chainID.String()
blobOnly := true
if link != nil {
snapshotID = link.getSnapshotID()
blobOnly = link.getBlobOnly()
go link.Release(context.TODO())
}

Expand Down Expand Up @@ -289,7 +287,7 @@ func (cm *cacheManager) GetByBlob(ctx context.Context, desc ocispecs.Descriptor,
rec.queueChainID(chainID)
rec.queueBlobChainID(blobChainID)
rec.queueSnapshotID(snapshotID)
rec.queueBlobOnly(blobOnly)
rec.queueBlobOnly(true)
rec.queueMediaType(desc.MediaType)
rec.queueBlobSize(desc.Size)
rec.appendURLs(desc.URLs)
Expand Down
8 changes: 7 additions & 1 deletion cache/metadata.go
Original file line number Diff line number Diff line change
Expand Up @@ -251,7 +251,13 @@ func (md *cacheMetadata) queueMediaType(str string) error {
}

func (md *cacheMetadata) getSnapshotID() string {
return md.GetString(keySnapshot)
sid := md.GetString(keySnapshot)
// Note that historic buildkit releases did not always set the snapshot ID.
// Fallback to record ID is needed for old build cache compatibility.
if sid == "" {
return md.ID()
}
return sid
}

func (md *cacheMetadata) queueSnapshotID(str string) error {
Expand Down
8 changes: 8 additions & 0 deletions cache/remotecache/gha/gha.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ import (
"context"
"encoding/json"
"fmt"
"io"
"os"
"sync"
"time"
Expand Down Expand Up @@ -371,6 +372,13 @@ type readerAt struct {
desc ocispecs.Descriptor
}

func (r *readerAt) ReadAt(p []byte, off int64) (int, error) {
if off >= r.desc.Size {
return 0, io.EOF
}
return r.ReaderAtCloser.ReadAt(p, off)
}

func (r *readerAt) Size() int64 {
return r.desc.Size
}
168 changes: 166 additions & 2 deletions client/client_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,7 @@ import (
digest "github.com/opencontainers/go-digest"
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
spdx "github.com/spdx/tools-golang/spdx/v2_3"
"github.com/stretchr/testify/require"
"golang.org/x/crypto/ssh/agent"
"golang.org/x/sync/errgroup"
Expand Down Expand Up @@ -189,6 +190,7 @@ func TestIntegration(t *testing.T) {
testAttestationBundle,
testSBOMScan,
testSBOMScanSingleRef,
testSBOMSupplements,
testMultipleCacheExports,
testMountStubsDirectory,
testMountStubsTimestamp,
Expand Down Expand Up @@ -1137,9 +1139,9 @@ func testSecretMounts(t *testing.T, sb integration.Sandbox) {
}, nil)
require.NoError(t, err)

// test optional
// test optional, mount should not exist when secret not present in SolveOpt
st = llb.Image("busybox:latest").
Run(llb.Shlex(`echo secret2`), llb.AddSecret("/run/secrets/mysecret2", llb.SecretOptional))
Run(llb.Shlex(`test ! -f /run/secrets/mysecret2`), llb.AddSecret("/run/secrets/mysecret2", llb.SecretOptional))

def, err = st.Marshal(sb.Context())
require.NoError(t, err)
Expand Down Expand Up @@ -1176,6 +1178,20 @@ func testSecretMounts(t *testing.T, sb integration.Sandbox) {
})},
}, nil)
require.NoError(t, err)

// test empty cert still creates secret file
st = llb.Image("busybox:latest").
Run(llb.Shlex(`test -f /run/secrets/mysecret5`), llb.AddSecret("/run/secrets/mysecret5", llb.SecretID("mysecret")))

def, err = st.Marshal(sb.Context())
require.NoError(t, err)

_, err = c.Solve(sb.Context(), def, SolveOpt{
Session: []session.Attachable{secretsprovider.FromMap(map[string][]byte{
"mysecret": []byte(""),
})},
}, nil)
require.NoError(t, err)
}

func testSecretEnv(t *testing.T, sb integration.Sandbox) {
Expand Down Expand Up @@ -8185,6 +8201,154 @@ EOF
require.Subset(t, attest.Predicate, map[string]interface{}{"name": "fallback"})
}

func testSBOMSupplements(t *testing.T, sb integration.Sandbox) {
integration.CheckFeatureCompat(t, sb, integration.FeatureDirectPush, integration.FeatureSBOM)
requiresLinux(t)
c, err := New(sb.Context(), sb.Address())
require.NoError(t, err)

registry, err := sb.NewRegistry()
if errors.Is(err, integration.ErrRequirements) {
t.Skip(err.Error())
}

p := platforms.MustParse("linux/amd64")
pk := platforms.Format(p)

frontend := func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
res := gateway.NewResult()

// build image
st := llb.Scratch().File(
llb.Mkfile("/foo", 0600, []byte{}),
)
def, err := st.Marshal(ctx)
if err != nil {
return nil, err
}
r, err := c.Solve(ctx, gateway.SolveRequest{
Definition: def.ToPB(),
})
if err != nil {
return nil, err
}
ref, err := r.SingleRef()
if err != nil {
return nil, err
}
_, err = ref.ToState()
if err != nil {
return nil, err
}
res.AddRef(pk, ref)

expPlatforms := &exptypes.Platforms{
Platforms: []exptypes.Platform{{ID: pk, Platform: p}},
}
dt, err := json.Marshal(expPlatforms)
if err != nil {
return nil, err
}
res.AddMeta(exptypes.ExporterPlatformsKey, dt)

// build attestations
doc := spdx.Document{
SPDXIdentifier: "DOCUMENT",
Files: []*spdx.File{
{
// foo exists...
FileSPDXIdentifier: "SPDXRef-File-foo",
FileName: "/foo",
},
{
// ...but bar doesn't
FileSPDXIdentifier: "SPDXRef-File-bar",
FileName: "/bar",
},
},
}
docBytes, err := json.Marshal(doc)
if err != nil {
return nil, err
}
st = llb.Scratch().
File(llb.Mkfile("/result.spdx", 0600, docBytes))
def, err = st.Marshal(ctx)
if err != nil {
return nil, err
}
r, err = c.Solve(ctx, gateway.SolveRequest{
Definition: def.ToPB(),
})
if err != nil {
return nil, err
}
refAttest, err := r.SingleRef()
if err != nil {
return nil, err
}
_, err = ref.ToState()
if err != nil {
return nil, err
}

res.AddAttestation(pk, gateway.Attestation{
Kind: gatewaypb.AttestationKindInToto,
Ref: refAttest,
Path: "/result.spdx",
InToto: result.InTotoAttestation{
PredicateType: intoto.PredicateSPDX,
},
Metadata: map[string][]byte{
result.AttestationSBOMCore: []byte("result"),
},
})

return res, nil
}

// test the default fallback scanner
target := registry + "/buildkit/testsbom:latest"
_, err = c.Build(sb.Context(), SolveOpt{
FrontendAttrs: map[string]string{
"attest:sbom": "",
},
Exports: []ExportEntry{
{
Type: ExporterImage,
Attrs: map[string]string{
"name": target,
"push": "true",
},
},
},
}, "", frontend, nil)
require.NoError(t, err)

desc, provider, err := contentutil.ProviderFromRef(target)
require.NoError(t, err)

imgs, err := testutil.ReadImages(sb.Context(), provider, desc)
require.NoError(t, err)
require.Equal(t, 2, len(imgs.Images))

att := imgs.Find("unknown/unknown")
attest := struct {
intoto.StatementHeader
Predicate spdx.Document
}{}
require.NoError(t, json.Unmarshal(att.LayersRaw[0], &attest))
require.Equal(t, "https://in-toto.io/Statement/v0.1", attest.Type)
require.Equal(t, intoto.PredicateSPDX, attest.PredicateType)

require.Equal(t, "DOCUMENT", string(attest.Predicate.SPDXIdentifier))
require.Len(t, attest.Predicate.Files, 2)
require.Equal(t, attest.Predicate.Files[0].FileName, "/foo")
require.Regexp(t, "^layerID: sha256:", attest.Predicate.Files[0].FileComment)
require.Equal(t, attest.Predicate.Files[1].FileName, "/bar")
require.Empty(t, attest.Predicate.Files[1].FileComment)
}

func testMultipleCacheExports(t *testing.T, sb integration.Sandbox) {
integration.CheckFeatureCompat(t, sb, integration.FeatureMultiCacheExport)
c, err := New(sb.Context(), sb.Address())
Expand Down
12 changes: 10 additions & 2 deletions exporter/containerimage/attestations.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ import (
"context"
"fmt"
"io/fs"
"path/filepath"
"strings"

intoto "github.com/in-toto/in-toto-golang/in_toto"
Expand All @@ -30,6 +31,9 @@ var intotoPlatform ocispecs.Platform = ocispecs.Platform{

// supplementSBOM modifies SPDX attestations to include the file layers
func supplementSBOM(ctx context.Context, s session.Group, target cache.ImmutableRef, targetRemote *solver.Remote, att exporter.Attestation) (exporter.Attestation, error) {
if target == nil {
return att, nil
}
if att.Kind != gatewaypb.AttestationKindInToto {
return att, nil
}
Expand All @@ -40,7 +44,7 @@ func supplementSBOM(ctx context.Context, s session.Group, target cache.Immutable
if !ok {
return att, nil
}
if n, _, _ := strings.Cut(att.Path, "."); n != string(name) {
if n, _, _ := strings.Cut(filepath.Base(att.Path), "."); n != string(name) {
return att, nil
}

Expand Down Expand Up @@ -172,6 +176,8 @@ func newFileLayerFinder(target cache.ImmutableRef, remote *solver.Remote) (fileL
//
// find is not concurrency-safe.
func (c *fileLayerFinder) find(ctx context.Context, s session.Group, filename string) (cache.ImmutableRef, *ocispecs.Descriptor, error) {
filename = filepath.Join("/", filename)

// return immediately if we've already found the layer containing filename
if cache, ok := c.cache[filename]; ok {
return cache.ref, &cache.desc, nil
Expand All @@ -188,7 +194,9 @@ func (c *fileLayerFinder) find(ctx context.Context, s session.Group, filename st

found := false
for _, f := range files {
if strings.HasPrefix(f, ".wh.") {
f = filepath.Join("/", f)

if strings.HasPrefix(filepath.Base(f), ".wh.") {
// skip whiteout files, we only care about file creations
continue
}
Expand Down
2 changes: 1 addition & 1 deletion exporter/util/epoch/parse.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,6 @@ func parseTime(key, value string) (*time.Time, error) {
if err != nil {
return nil, errors.Wrapf(err, "invalid %s: %s", key, err)
}
tm := time.Unix(sde, 0)
tm := time.Unix(sde, 0).UTC()
return &tm, nil
}
Loading