diff --git a/.github/workflows/kind-e2e.yaml b/.github/workflows/kind-e2e.yaml index 60bf099f4a..67f5202fe1 100644 --- a/.github/workflows/kind-e2e.yaml +++ b/.github/workflows/kind-e2e.yaml @@ -32,7 +32,6 @@ jobs: fail-fast: false # Keep running if one leg fails. matrix: pipelines-release: - - v0.41.3 # LTS - v0.44.4 # LTS - v0.47.3 # LTS - v0.50.1 # LTS diff --git a/examples/kaniko/gcp/kaniko.yaml b/examples/kaniko/gcp/kaniko.yaml index 6e62432557..3e4486ee3d 100644 --- a/examples/kaniko/gcp/kaniko.yaml +++ b/examples/kaniko/gcp/kaniko.yaml @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -apiVersion: tekton.dev/v1beta1 +apiVersion: tekton.dev/v1 kind: Task metadata: name: kaniko-gcp diff --git a/examples/kaniko/gcp/taskrun.yaml b/examples/kaniko/gcp/taskrun.yaml index de8e0e6393..4017995af6 100644 --- a/examples/kaniko/gcp/taskrun.yaml +++ b/examples/kaniko/gcp/taskrun.yaml @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -apiVersion: tekton.dev/v1beta1 +apiVersion: tekton.dev/v1 kind: TaskRun metadata: name: kaniko-gcp diff --git a/examples/kaniko/kaniko.yaml b/examples/kaniko/kaniko.yaml index ad5dc752ef..2606deb58a 100644 --- a/examples/kaniko/kaniko.yaml +++ b/examples/kaniko/kaniko.yaml @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -apiVersion: tekton.dev/v1beta1 +apiVersion: tekton.dev/v1 kind: Task metadata: name: kaniko-chains diff --git a/examples/kaniko/taskrun.yaml b/examples/kaniko/taskrun.yaml index d2785d36f0..cfc8cbc791 100644 --- a/examples/kaniko/taskrun.yaml +++ b/examples/kaniko/taskrun.yaml @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -apiVersion: tekton.dev/v1beta1 +apiVersion: tekton.dev/v1 kind: TaskRun metadata: name: kaniko-run diff --git a/examples/pipelineruns/pipeline-output-image.yaml b/examples/pipelineruns/pipeline-output-image.yaml index a1d8761adb..b0e2efe573 100644 --- a/examples/pipelineruns/pipeline-output-image.yaml +++ b/examples/pipelineruns/pipeline-output-image.yaml @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -apiVersion: tekton.dev/v1beta1 +apiVersion: tekton.dev/v1 kind: PipelineRun metadata: generateName: image-pipelinerun diff --git a/examples/releases/v0.3.0-build-chains-taskrun.yaml b/examples/releases/v0.3.0-build-chains-taskrun.yaml index f80076094b..db9b95bc9a 100644 --- a/examples/releases/v0.3.0-build-chains-taskrun.yaml +++ b/examples/releases/v0.3.0-build-chains-taskrun.yaml @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -apiVersion: tekton.dev/v1beta1 +apiVersion: tekton.dev/v1 kind: TaskRun metadata: generateName: build-chains- diff --git a/examples/taskruns/task-output-image.yaml b/examples/taskruns/task-output-image.yaml index 8e780970de..5b1e6c1aea 100644 --- a/examples/taskruns/task-output-image.yaml +++ b/examples/taskruns/task-output-image.yaml @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -apiVersion: tekton.dev/v1beta1 +apiVersion: tekton.dev/v1 kind: TaskRun metadata: name: build-push-run-output-image-test diff --git a/go.mod b/go.mod index ad72ab5a1b..ea1641042c 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/tektoncd/chains -go 1.20 +go 1.21 require ( cloud.google.com/go/compute/metadata v0.2.3 @@ -15,6 +15,7 @@ require ( github.com/hashicorp/go-multierror v1.1.1 github.com/in-toto/in-toto-golang v0.9.0 github.com/opencontainers/go-digest v1.0.0 + github.com/opentracing/opentracing-go v1.2.0 github.com/pkg/errors v0.9.1 github.com/secure-systems-lab/go-securesystemslib v0.8.0 github.com/sigstore/cosign/v2 v2.2.2 @@ -26,7 +27,7 @@ require ( github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.8.0 github.com/spiffe/go-spiffe/v2 v2.1.6 github.com/stretchr/testify v1.8.4 - github.com/tektoncd/pipeline v0.55.0 + github.com/tektoncd/pipeline v0.55.1-0.20240105143253-fe47c9bc893a github.com/tektoncd/plumbing v0.0.0-20230907180608-5625252a2de1 go.uber.org/zap v1.26.0 gocloud.dev v0.36.0 @@ -36,7 +37,7 @@ require ( google.golang.org/grpc v1.60.1 google.golang.org/protobuf v1.32.0 k8s.io/api v0.28.3 - k8s.io/apimachinery v0.28.3 + k8s.io/apimachinery v0.29.0 k8s.io/client-go v0.28.3 k8s.io/code-generator v0.26.5 knative.dev/pkg v0.0.0-20231023150739-56bfe0dd9626 @@ -315,7 +316,6 @@ require ( github.com/oleiade/reflections v1.0.1 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect github.com/opencontainers/image-spec v1.1.0-rc5 // indirect - github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/openzipkin/zipkin-go v0.3.0 // indirect github.com/pborman/uuid v1.2.1 // indirect github.com/pelletier/go-toml/v2 v2.1.0 // indirect @@ -399,7 +399,7 @@ require ( github.com/zeebo/errs v1.3.0 // indirect gitlab.com/bosi/decorder v0.4.1 // indirect go-simpler.org/sloglint v0.1.2 // indirect - go.mongodb.org/mongo-driver v1.13.0 // indirect + go.mongodb.org/mongo-driver v1.13.1 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 // indirect @@ -441,7 +441,7 @@ require ( honnef.co/go/tools v0.4.6 // indirect k8s.io/apiextensions-apiserver v0.26.5 // indirect k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01 // indirect - k8s.io/klog/v2 v2.100.1 // indirect + k8s.io/klog/v2 v2.110.1 // indirect k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect mvdan.cc/gofumpt v0.5.0 // indirect @@ -450,7 +450,7 @@ require ( mvdan.cc/unparam v0.0.0-20221223090309-7455f1af531d // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/release-utils v0.7.7 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.3.0 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect ) // Replace glog to fix flag collision between klog and glog. diff --git a/go.sum b/go.sum index 8744e2381b..e0a4b33d32 100644 --- a/go.sum +++ b/go.sum @@ -71,6 +71,7 @@ cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+ cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= cloud.google.com/go/pubsub v1.33.0 h1:6SPCPvWav64tj0sVX/+npCBKhUi/UjJehy9op/V3p2g= +cloud.google.com/go/pubsub v1.33.0/go.mod h1:f+w71I33OMyxf9VpMVcZbnG5KSUkCOUHYpFd5U1GdRc= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= @@ -81,11 +82,13 @@ cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq cloud.google.com/go/storage v1.36.0 h1:P0mOkAcaJxhCTvAkMhxMfrTKiNcub4YmmPBtlhAyTr8= cloud.google.com/go/storage v1.36.0/go.mod h1:M6M/3V/D3KpzMTJyPOR/HU6n2Si5QdaXYEsng2xgOs8= code.gitea.io/sdk/gitea v0.16.0 h1:gAfssETO1Hv9QbE+/nhWu7EjoFQYKt6kPoyDytQgw00= +code.gitea.io/sdk/gitea v0.16.0/go.mod h1:ndkDk99BnfiUCCYEUhpNzi0lpmApXlwRFqClBlOlEBg= contrib.go.opencensus.io/exporter/ocagent v0.7.1-0.20200907061046-05415f1de66d h1:LblfooH1lKOpp1hIhukktmSAxFkqMPFk9KR6iZ0MJNI= contrib.go.opencensus.io/exporter/ocagent v0.7.1-0.20200907061046-05415f1de66d/go.mod h1:IshRmMJBhDfFj5Y67nVhMYTTIze91RUeT73ipWKs/GY= contrib.go.opencensus.io/exporter/prometheus v0.4.0 h1:0QfIkj9z/iVZgK31D9H9ohjjIDApI2GOPScCKwxedbs= contrib.go.opencensus.io/exporter/prometheus v0.4.0/go.mod h1:o7cosnyfuPVK0tB8q0QmaQNhGnptITnPQB+z1+qeFB0= dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= +dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= filippo.io/edwards25519 v1.0.0 h1:0wAIcmJUqRdI8IJ/3eGi5/HwXZWPujYXXlkrQogz0Ek= filippo.io/edwards25519 v1.0.0/go.mod h1:N1IkdkCkiLB6tki+MYJoSx2JTY9NUlxZE7eHn5EwJns= @@ -94,6 +97,7 @@ github.com/4meepo/tagalign v1.3.3/go.mod h1:Q9c1rYMZJc9dPRkbQPpcBNCLEmY2njbAsXhQ github.com/Abirdcfly/dupword v0.0.13 h1:SMS17YXypwP000fA7Lr+kfyBQyW14tTT+nRv9ASwUUo= github.com/Abirdcfly/dupword v0.0.13/go.mod h1:Ut6Ue2KgF/kCOawpW4LnExT+xZLQviJPE4klBPMK/5Y= github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230618160516-e936619f9f18 h1:rd389Q26LMy03gG4anandGFC2LW/xvjga5GezeeaxQk= +github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230618160516-e936619f9f18/go.mod h1:fgJuSBrJP5qZtKqaMJE0hmhS2tmRH+44IkfZvjtaf1M= github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo/helper v0.2.0 h1:8+4G8JaejP8Xa6W46PzJEwisNgBXMvFcz78N6zG/ARw= github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo/helper v0.2.0/go.mod h1:GgeIE+1be8Ivm7Sh4RgwI42aTtC9qrcj+Y9Y6CjJhJs= github.com/Antonboom/errname v0.1.12 h1:oh9ak2zUtsLp5oaEd/erjB4GPu9w19NyoIskZClDcQY= @@ -155,6 +159,7 @@ github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/Microsoft/hcsshim v0.11.1 h1:hJ3s7GbWlGK4YVV92sO88BQSyF4ZLVy7/awqOlPxFbA= +github.com/Microsoft/hcsshim v0.11.1/go.mod h1:nFJmaO4Zr5Y7eADdFOpYswDDlNVbvcIJJNJLECr5JQg= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/OpenPeeDeeP/depguard/v2 v2.1.0 h1:aQl70G173h/GZYhWf36aE5H0KaujXfVMnn/f1kSDVYY= github.com/OpenPeeDeeP/depguard/v2 v2.1.0/go.mod h1:PUBgk35fX4i7JDmwzlJwJ+GMe6NfO1723wmJMgPThNQ= @@ -165,18 +170,22 @@ github.com/Shopify/toxiproxy/v2 v2.1.6-0.20210914104332-15ea381dcdae/go.mod h1:/ github.com/ThalesIgnite/crypto11 v1.2.5 h1:1IiIIEqYmBvUYFeMnHqRft4bwf/O36jryEUpY+9ef8E= github.com/ThalesIgnite/crypto11 v1.2.5/go.mod h1:ILDKtnCKiQ7zRoNxcp36Y1ZR8LBPmR2E23+wTQe/MlE= github.com/acomagu/bufpipe v1.0.4 h1:e3H4WUzM3npvo5uv95QuJM3cQspFNtFBzvJ2oNjKIDQ= +github.com/acomagu/bufpipe v1.0.4/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4= github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 h1:uSoVVbwJiQipAclBbw+8quDsfcvFjOpI5iCf4p/cqCs= github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs= github.com/alecthomas/assert/v2 v2.2.2 h1:Z/iVC0xZfWTaFNE6bA3z07T86hd45Xe2eLt6WVy2bbk= +github.com/alecthomas/assert/v2 v2.2.2/go.mod h1:pXcQ2Asjp247dahGEmsZ6ru0UVwnkhktn7S0bBDLxvQ= github.com/alecthomas/go-check-sumtype v0.1.3 h1:M+tqMxB68hcgccRXBMVCPI4UJ+QUfdSx0xdbypKCqA8= github.com/alecthomas/go-check-sumtype v0.1.3/go.mod h1:WyYPfhfkdhyrdaligV6svFopZV8Lqdzn5pyVBaV6jhQ= github.com/alecthomas/repr v0.2.0 h1:HAzS41CIzNW5syS8Mf9UwXhNH1J9aix/BvDRf1Ml2Yk= +github.com/alecthomas/repr v0.2.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alessio/shellescape v1.4.1 h1:V7yhSDDn8LP4lc4jS8pFkt0zCnzVJlG5JXy9BVKJUX0= +github.com/alessio/shellescape v1.4.1/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30= github.com/alexkohler/nakedret/v2 v2.0.2 h1:qnXuZNvv3/AxkAb22q/sEsEpcA99YxLFACDtEw9TPxE= github.com/alexkohler/nakedret/v2 v2.0.2/go.mod h1:2b8Gkk0GsOrqQv/gPWjNLDSKwG8I5moSXG1K4VIBcTQ= github.com/alexkohler/prealloc v1.0.0 h1:Hbq0/3fJPQhNkN0dR95AVrr6R7tou91y0uHG5pOcUuw= @@ -227,6 +236,7 @@ github.com/aliyun/credentials-go v1.3.1 h1:uq/0v7kWrxmoLGpqjx7vtQ/s03f0zR//0br/x github.com/aliyun/credentials-go v1.3.1/go.mod h1:8jKYhQuDawt8x2+fusqa1Y6mPxemTsBEN04dgcAcYz0= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= +github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18= github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM= @@ -296,6 +306,7 @@ github.com/blendle/zapdriver v1.3.1/go.mod h1:mdXfREi6u5MArG4j9fewC+FGnXaBR+T4Ox github.com/blizzy78/varnamelen v0.8.0 h1:oqSblyuQvFsW1hbBHh1zfwrKe3kcSj0rnXkKzsQ089M= github.com/blizzy78/varnamelen v0.8.0/go.mod h1:V9TzQZ4fLJ1DSrjVDfl89H7aMnTvKkApdHeyESmyR7k= github.com/bluekeyes/go-gitdiff v0.7.1 h1:graP4ElLRshr8ecu0UtqfNTCHrtSyZd3DABQm/DWesQ= +github.com/bluekeyes/go-gitdiff v0.7.1/go.mod h1:QpfYYO1E0fTVHVZAZKiRjtSGY9823iCdvGXBcEzHGbM= github.com/bmatcuk/doublestar/v4 v4.0.2 h1:X0krlUVAVmtr2cRoTqR8aDMrDqnB36ht8wpWTiQ3jsA= github.com/bmatcuk/doublestar/v4 v4.0.2/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= github.com/bombsimon/wsl/v3 v3.4.0 h1:RkSxjT3tmlptwfgEgTgU+KYKLI35p/tviNXNXiL2aNU= @@ -357,11 +368,15 @@ github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20231016030527-8bd2eac9fb4a h1:SZL0tarhuhoN0kvo5pfO4i6vxYghwzXUo9w0WHIjI4k= +github.com/cncf/xds/go v0.0.0-20231016030527-8bd2eac9fb4a/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUoc7Ik9EfrFqcylYqgPZ9ANSbTAntnE= +github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4= github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be h1:J5BL2kskAlV9ckgEsNQXscjIaLiOYiZ75d4e94E6dcQ= github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be/go.mod h1:mk5IQ+Y0ZeO87b858TlA645sVcEcbiX6YqP98kt+7+w= github.com/containerd/containerd v1.7.8 h1:RkwgOW3AVUT3H/dyT0W03Dc8AzlpMG65lX48KftOFSM= +github.com/containerd/containerd v1.7.8/go.mod h1:L/Hn9qylJtUFT7cPeM0Sr3fATj+WjHwRQ0lyrYk3OPY= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/containerd/stargz-snapshotter/estargz v0.14.3 h1:OqlDCK3ZVUO6C3B/5FSkDwbkEETK84kQgEeFwDC+62k= github.com/containerd/stargz-snapshotter/estargz v0.14.3/go.mod h1:KY//uOCIkSuNAHhJogcZtrNHdKrA99/FCCRjE3HD36o= github.com/coreos/go-oidc/v3 v3.9.0 h1:0J/ogVOd4y8P0f0xUh8l9t07xRP/d8tccvjHl2dcsSo= @@ -376,17 +391,21 @@ github.com/curioswitch/go-reassign v0.2.0/go.mod h1:x6OpXuWvgfQaMGks2BZybTngWjT8 github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46 h1:2Dx4IHfC1yHWI12AxQDJM1QbRCDfk6M+blLzlZCXdrc= github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= +github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/daixiang0/gci v0.11.2 h1:Oji+oPsp3bQ6bNNgX30NBAVT18P4uBH4sRZnlOlTj7Y= github.com/daixiang0/gci v0.11.2/go.mod h1:xtHP9N7AHdNvtRNfcx9gwTDfw7FRJx4bZUsiEfiNNAI= github.com/danieljoos/wincred v1.2.0 h1:ozqKHaLK0W/ii4KVbbvluM91W2H3Sh0BncbUNPS7jLE= +github.com/danieljoos/wincred v1.2.0/go.mod h1:FzQLLMKBFdvu+osBrnFODiv32YGwCfx0SkRa/eYHgec= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davidmz/go-pageant v1.0.2 h1:bPblRCh5jGU+Uptpz6LgMZGD5hJoOt7otgT454WvHn0= +github.com/davidmz/go-pageant v1.0.2/go.mod h1:P2EDDnMqIwG5Rrp05dTRITj9z2zpGcD9efWSkTNKLIE= github.com/denis-tingaikin/go-header v0.4.3 h1:tEaZKAlqql6SKCY++utLmkPLd6K8IBM20Ha7UVm+mtU= github.com/denis-tingaikin/go-header v0.4.3/go.mod h1:0wOCWuN71D5qIgE2nz9KrKmuYBAC2Mra5RassOIQ2/c= github.com/depcheck-test/depcheck-test v0.0.0-20220607135614-199033aaa936 h1:foGzavPWwtoyBvjWyKJYDYsyzy+23iBV7NKTwdk+LRY= +github.com/depcheck-test/depcheck-test v0.0.0-20220607135614-199033aaa936/go.mod h1:ttKPnOepYt4LLzD+loXQ1rT6EmpyIYHro7TAJuIIlHo= github.com/digitorus/pkcs7 v0.0.0-20230713084857-e76b763bdc49/go.mod h1:SKVExuS+vpu2l9IoOc0RwqE7NYnb0JlcFHFnEJkVDzc= github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352 h1:ge14PCmCvPjpMQMIAH7uKg0lrtNSOdpYsRXlwk3QbaE= github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352/go.mod h1:SKVExuS+vpu2l9IoOc0RwqE7NYnb0JlcFHFnEJkVDzc= @@ -395,6 +414,7 @@ github.com/digitorus/timestamp v0.0.0-20230902153158-687734543647/go.mod h1:GvWn github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= +github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= github.com/docker/cli v24.0.7+incompatible h1:wa/nIwYFW7BVTGa7SWPVyyXU9lgORqUb1xfI36MSkFg= github.com/docker/cli v24.0.7+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= @@ -427,6 +447,7 @@ github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go. github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= +github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= github.com/esimonov/ifshort v1.0.4 h1:6SID4yGWfRae/M7hkVDVVyppy8q/v9OuxNdmjLQStBA= github.com/esimonov/ifshort v1.0.4/go.mod h1:Pe8zjlRrJ80+q2CxHLfEOfTwxCZ4O+MuhcHcfgNWTk0= github.com/ettle/strcase v0.1.1 h1:htFueZyVeE1XNnMEfbqp5r67qAN/4r6ya1ysq8Q+Zcw= @@ -449,6 +470,7 @@ github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8 github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= +github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= @@ -461,14 +483,19 @@ github.com/ghostiam/protogetter v0.2.3 h1:qdv2pzo3BpLqezwqfGDLZ+nHEYmc5bUpIdsMbB github.com/ghostiam/protogetter v0.2.3/go.mod h1:KmNLOsy1v04hKbvZs8EfGI1fk39AgTdRDxWNYPfXVc4= github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/gliderlabs/ssh v0.3.5 h1:OcaySEmAQJgyYcArR+gGGTHCyE7nvhEMTlYY+Dp8CpY= +github.com/gliderlabs/ssh v0.3.5/go.mod h1:8XB4KraRrX39qHhT6yxPsHedjA08I/uBVwj4xC+/+z4= github.com/go-chi/chi v4.1.2+incompatible h1:fGFk2Gmi/YKXk0OmGfBh0WgmN3XB8lVnEyNz34tQRec= github.com/go-chi/chi v4.1.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ= github.com/go-critic/go-critic v0.9.0 h1:Pmys9qvU3pSML/3GEQ2Xd9RZ/ip+aXHKILuxczKGV/U= github.com/go-critic/go-critic v0.9.0/go.mod h1:5P8tdXL7m/6qnyG6oRAlYLORvoXH0WDypYgAEmagT40= github.com/go-fed/httpsig v1.1.0 h1:9M+hb0jkEICD8/cAiNqEB66R87tTINszBRTjwjQzWcI= +github.com/go-fed/httpsig v1.1.0/go.mod h1:RCMrTZvN1bJYtofsG4rd5NaO5obxQ5xBkdiS7xsT7bM= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= +github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU= +github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow= github.com/go-git/go-git/v5 v5.10.0 h1:F0x3xXrAWmhwtzoCokU4IMPcBdncG+HAAqi9FcOOjbQ= +github.com/go-git/go-git/v5 v5.10.0/go.mod h1:1FOZ/pQnqw24ghP2n7cunVl0ON55BsjPYvhWHvZGhoo= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -523,10 +550,13 @@ github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+ github.com/go-openapi/validate v0.22.3 h1:KxG9mu5HBRYbecRb37KRCihvGGtND2aXziBAv0NNfyI= github.com/go-openapi/validate v0.22.3/go.mod h1:kVxh31KbfsxU8ZyoHaDbLBWU5CnMdqBUEtadQ2G4d5M= github.com/go-rod/rod v0.114.5 h1:1x6oqnslwFVuXJbJifgxspJUd3O4ntaGhRLHt+4Er9c= +github.com/go-rod/rod v0.114.5/go.mod h1:aiedSEFg5DwG/fnNbUOTPMTTWX3MRj6vIs/a684Mthw= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg= +github.com/go-test/deep v1.1.0/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= github.com/go-toolsmith/astcast v1.1.0 h1:+JN9xZV1A+Re+95pgnMgDboWNVnIMMQXwfBwLRPgSC8= github.com/go-toolsmith/astcast v1.1.0/go.mod h1:qdcuFWeGGS2xX5bLM/c3U9lewg7+Zu4mr+xPwZIB4ZU= github.com/go-toolsmith/astcopy v1.1.0 h1:YGwBN0WM+ekI/6SS6+52zLDEf8Yvp3n2seZITCUBt5s= @@ -539,6 +569,7 @@ github.com/go-toolsmith/astfmt v1.1.0/go.mod h1:OrcLlRwu0CuiIBp/8b5PYF9ktGVZUjlN github.com/go-toolsmith/astp v1.1.0 h1:dXPuCl6u2llURjdPLLDxJeZInAeZ0/eZwFJmqZMnpQA= github.com/go-toolsmith/astp v1.1.0/go.mod h1:0T1xFGz9hicKs8Z5MfAqSUitoUYS30pDMsRVIDHs8CA= github.com/go-toolsmith/pkgload v1.2.2 h1:0CtmHq/02QhxcF7E9N5LIFcYFsMR5rdovfqTtRKkgIk= +github.com/go-toolsmith/pkgload v1.2.2/go.mod h1:R2hxLNRKuAsiXCo2i5J6ZQPhnPMOVtU+f0arbFPWCus= github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8= github.com/go-toolsmith/strparse v1.1.0 h1:GAioeZUK9TGxnLS+qfdqNbA4z0SSm5zVNtCQiyP2Bvw= github.com/go-toolsmith/strparse v1.1.0/go.mod h1:7ksGy58fsaQkGQlY8WVoBFNyEPMGuJin1rfoPS4lBSQ= @@ -549,7 +580,9 @@ github.com/go-xmlfmt/xmlfmt v1.1.2/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6C github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/goccy/kpoward v0.1.0 h1:UcrLMG9rq7NwrMiUc0h+qUyIlvqPzqLiPb+zQEqH8cE= +github.com/goccy/kpoward v0.1.0/go.mod h1:m13lkcWSvNXtYC9yrXzguwrt/YTDAGioPusndMdQ+eA= github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= +github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -658,8 +691,10 @@ github.com/google/go-licenses v1.6.0/go.mod h1:Z8jgz2isEhdenOqd/00pq7I4y4k1xVVQJ github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/go-replayers/grpcreplay v1.1.0 h1:S5+I3zYyZ+GQz68OfbURDdt/+cSMqCK1wrvNx7WBzTE= +github.com/google/go-replayers/grpcreplay v1.1.0/go.mod h1:qzAvJ8/wi57zq7gWqaE6AwLM6miiXUQwP1S+I9icmhk= github.com/google/go-replayers/httpreplay v1.1.1/go.mod h1:gN9GeLIs7l6NUoVaSSnv2RiqK1NiwAmD0MrKeC9IIks= github.com/google/go-replayers/httpreplay v1.2.0 h1:VM1wEyyjaoU53BwrOnaf9VhAyQQEEioJvFYxYcLRKzk= +github.com/google/go-replayers/httpreplay v1.2.0/go.mod h1:WahEFFZZ7a1P4VM1qEeHy+tME4bwyqPcwWbNlUI1Mcg= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= @@ -691,12 +726,15 @@ github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b h1:RMpPgZTSApbPf7xaVel+QkoGPRLFLrwFO89uDUHEGf0= +github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/subcommands v1.0.1/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= github.com/google/tink/go v1.7.0 h1:6Eox8zONGebBFcCBqkVmt60LaWZa6xg1cl/DwAh/J1w= +github.com/google/tink/go v1.7.0/go.mod h1:GAUOd+QE3pgj9q8VKIGTCP33c/B7eb4NhxLcgTJZStM= github.com/google/trillian v1.5.3 h1:3ioA5p09qz+U9/t2riklZtaQdZclaStp0/eQNfewNRg= +github.com/google/trillian v1.5.3/go.mod h1:p4tcg7eBr7aT6DxrAoILpc3uXNfcuAvZSnQKonVg+Eo= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -738,6 +776,7 @@ github.com/gostaticanalysis/nilerr v0.1.1 h1:ThE+hJP0fEp4zWLkWHWcRyI2Od0p7DlgYG3 github.com/gostaticanalysis/nilerr v0.1.1/go.mod h1:wZYb6YI5YAxxq0i1+VJbY0s2YONW0HU0GPE3+5PWN4A= github.com/gostaticanalysis/testutil v0.3.1-0.20210208050101-bfb5c8eec0e4/go.mod h1:D+FIZ+7OahH3ePw/izIEeH5I06eKs1IKI4Xr64/Am3M= github.com/gostaticanalysis/testutil v0.4.0 h1:nhdCmubdmDF6VEatUNjgUZBJKWRqugoISdUv3PPQgHY= +github.com/gostaticanalysis/testutil v0.4.0/go.mod h1:bLIoPefWXrRi/ssLFWX1dx7Repi5x3CuviD3dgAZaBU= github.com/grafeas/grafeas v0.2.3 h1:B9Bgc3ZQjPhqXKmro95Dfyb+GlE6D1pMuExT+n66ChE= github.com/grafeas/grafeas v0.2.3/go.mod h1:O+UvNYn4LhdKR59XrxRDWwr2bbheR1KRRNdD8mJpxs4= github.com/grpc-ecosystem/grpc-gateway v1.14.6/go.mod h1:zdiPV4Yse/1gnckTHtghG4GkDEdKCRJduHpTxT3/jcw= @@ -751,6 +790,7 @@ github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9n github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= +github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= @@ -783,6 +823,7 @@ github.com/hashicorp/vault/api v1.10.0/go.mod h1:jo5Y/ET+hNyz+JnKDt8XLAdKs+AM0G5 github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef h1:A9HsByNhogrvm9cWb28sjiS3i7tcKCkflWFEkHfuAgM= +github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef/go.mod h1:lADxMC39cJJqL93Duh1xhAs4I2Zs8mKS89XWXFGp9cs= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= @@ -816,6 +857,7 @@ github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267/go.mod h1:h1n github.com/jellydator/ttlcache/v3 v3.1.1 h1:RCgYJqo3jgvhl+fEWvjNW8thxGWsgxi+TPhRir1Y9y8= github.com/jellydator/ttlcache/v3 v3.1.1/go.mod h1:hi7MGFdMAwZna5n2tuvh63DvFLzVKySzCVW6+0gA2n4= github.com/jenkins-x/go-scm v1.14.20 h1:9AvGdFh9a87WYDbIQaecnylsIuowQM8nbEfDuYlFJYY= +github.com/jenkins-x/go-scm v1.14.20/go.mod h1:4EvjlLLeR4ilT413ZYXhop4Kxji4dYOJI3dlliKFut8= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jgautheron/goconst v1.6.0 h1:gbMLWKRMkzAc6kYsQL6/TxaoBUg3Jm9LSF/Ih1ADWGA= github.com/jgautheron/goconst v1.6.0/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4= @@ -828,6 +870,7 @@ github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHW github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jmhodges/clock v1.2.0 h1:eq4kys+NI0PLngzaHEe7AmPT90XMGIEySD1JfV1PDIs= +github.com/jmhodges/clock v1.2.0/go.mod h1:qKjhA7x7u/lQpPB1XAqX1b1lCI/w3/fNuYpI/ZjLynI= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= @@ -866,6 +909,7 @@ github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORN github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= @@ -928,6 +972,7 @@ github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= @@ -936,7 +981,9 @@ github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= +github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -982,12 +1029,14 @@ github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= +github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= github.com/onsi/gomega v1.28.1 h1:MijcGUbfYuznzK/5R4CPNoUP/9Xvuo20sXfEm6XxoTA= +github.com/onsi/gomega v1.28.1/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI= @@ -999,6 +1048,7 @@ github.com/openzipkin/zipkin-go v0.3.0/go.mod h1:4c3sLeE8xjNqehmF5RpAFLPLJxXscc0 github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw= github.com/otiai10/copy v1.6.0/go.mod h1:XWfuS3CrI0R6IE0FbgHsEazaXO8G0LpMp9o8tos0x4E= github.com/otiai10/copy v1.11.0 h1:OKBD80J/mLBrwnzXqGtFCzprFSGioo30JcmR4APsNwc= +github.com/otiai10/copy v1.11.0/go.mod h1:rSaLseMUsZFFbsFGc7wCJnnkTAvdc5L6VWxPE4308Ww= github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs= github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= @@ -1013,6 +1063,7 @@ github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ= github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= +github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -1027,6 +1078,7 @@ github.com/polyfloyd/go-errorlint v1.4.5 h1:70YWmMy4FgRHehGNOUask3HtSFSOLKgmDn7r github.com/polyfloyd/go-errorlint v1.4.5/go.mod h1:sIZEbFoDOCnTYYZoVkjc4hTnM459tuWA9H/EkdXwsKk= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= +github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= @@ -1073,6 +1125,7 @@ github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUc github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= +github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryancurrah/gomodguard v1.3.0 h1:q15RT/pd6UggBXVBuLps8BXRvl5GPBcwVA7BJHMLuTw= @@ -1095,6 +1148,7 @@ github.com/sashamelentyev/usestdlibvars v1.24.0/go.mod h1:9cYkq+gYJ+a5W2RPdhfaSC github.com/sassoftware/relic v7.2.1+incompatible h1:Pwyh1F3I0r4clFJXkSI8bOyJINGqpgjJU3DYAZeI05A= github.com/sassoftware/relic v7.2.1+incompatible/go.mod h1:CWfAxv73/iLZ17rbyhIEq3K9hs5w6FpNMdUT//qR+zk= github.com/sassoftware/relic/v7 v7.6.1 h1:O5s8ewCgq5QYNpv45dK4u6IpBmDM9RIcsbf/G1uXepQ= +github.com/sassoftware/relic/v7 v7.6.1/go.mod h1:NxwtWxWxlUa9as2qZi635Ye6bBT/tGnMALLq7dSfOOU= github.com/secure-systems-lab/go-securesystemslib v0.8.0 h1:mr5An6X45Kb2nddcFlbmfHkLguCE9laoZCUzEEpIZXA= github.com/secure-systems-lab/go-securesystemslib v0.8.0/go.mod h1:UH2VZVuJfCYR8WgMlCU1uFsOUU+KeyrTWcSS73NBOzU= github.com/securego/gosec/v2 v2.18.2 h1:DkDt3wCiOtAHf1XkiXZBhQ6m6mK/b9T/wD257R3/c+I= @@ -1110,9 +1164,11 @@ github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAx github.com/shibumi/go-pathspec v1.3.0 h1:QUyMZhFo0Md5B8zV8x2tesohbb5kfbpTi9rBnKh5dkI= github.com/shibumi/go-pathspec v1.3.0/go.mod h1:Xutfslp817l2I1cZvgcfeMQJG5QnU2lh5tVaaMCl3jE= github.com/shurcooL/githubv4 v0.0.0-20190718010115-4ba037080260 h1:xKXiRdBUtMVp64NaxACcyX4kvfmHJ9KrLU+JvyB1mdM= +github.com/shurcooL/githubv4 v0.0.0-20190718010115-4ba037080260/go.mod h1:hAF0iLZy4td2EX+/8Tw+4nodhlMrwN3HupfaXj3zkGo= github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= github.com/shurcooL/graphql v0.0.0-20181231061246-d48a9a75455f h1:tygelZueB1EtXkPI6mQ4o9DQ0+FKW41hTbunoXZCTqk= +github.com/shurcooL/graphql v0.0.0-20181231061246-d48a9a75455f/go.mod h1:AuYgA5Kyo4c7HfUmvRGs/6rGlMMV/6B1bVnB9JxJEEg= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sigstore/cosign/v2 v2.2.2 h1:V1uE1/QnKGfj77vuqlEGBg6O2ZJqOrWkLwjTC21Vxw0= github.com/sigstore/cosign/v2 v2.2.2/go.mod h1:bNmX0qyby7sgcqY9oY/jy5m+XJ3N3LtpOsNfO+A1CGo= @@ -1146,9 +1202,11 @@ github.com/sivchari/nosnakecase v1.7.0/go.mod h1:CwDzrzPea40/GB6uynrNLiorAlgFRvR github.com/sivchari/tenv v1.7.1 h1:PSpuD4bu6fSmtWMxSGWcvqUUgIn7k3yOJhOIzVWn8Ak= github.com/sivchari/tenv v1.7.1/go.mod h1:64yStXKSOxDfX47NlhVwND4dHwfZDdbp2Lyl018Icvg= github.com/skeema/knownhosts v1.2.0 h1:h9r9cf0+u7wSE+M183ZtMGgOJKiL96brpaz5ekfJCpM= +github.com/skeema/knownhosts v1.2.0/go.mod h1:g4fPeYpque7P0xefxtGzV81ihjC8sX2IqpAoNkjxbMo= github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 h1:JIAuq3EEf9cgbU6AtGPK4CTG3Zf6CKMNqf0MHTggAUA= github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog= github.com/smallstep/assert v0.0.0-20200723003110-82e2b9b3b262 h1:unQFBIznI+VYD1/1fApl1A+9VcBk+9dcqGfnePY87LY= +github.com/smallstep/assert v0.0.0-20200723003110-82e2b9b3b262/go.mod h1:MyOHs9Po2fbM1LHej6sBUT8ozbxmMOFG+E+rx/GSGuc= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v1.1.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= @@ -1173,6 +1231,7 @@ github.com/spf13/viper v1.17.0/go.mod h1:BmMMMLQXSbcHK6KAOiFLz0l5JHrU89OdIRHvsk0 github.com/spiffe/go-spiffe/v2 v2.1.6 h1:4SdizuQieFyL9eNU+SPiCArH4kynzaKOOj0VvM8R7Xo= github.com/spiffe/go-spiffe/v2 v2.1.6/go.mod h1:eVDqm9xFvyqao6C+eQensb9ZPkyNEeaUbqbBpOhBnNk= github.com/spiffe/spire-api-sdk v1.8.5 h1:DjYWO2muHvhwOBOTz/0zTGiBwJkofX/1V9mUAI+P4tU= +github.com/spiffe/spire-api-sdk v1.8.5/go.mod h1:4uuhFlN6KBWjACRP3xXwrOTNnvaLp1zJs8Lribtr4fI= github.com/src-d/gcfg v1.4.0 h1:xXbNR5AlLSA315x2UO+fTSSAXCDf+Ar38/6oyGbDKQ4= github.com/src-d/gcfg v1.4.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI= github.com/ssgreg/nlreturn/v2 v2.2.1 h1:X4XDI7jstt3ySqGU86YGAURbxw3oTDPK9sPEi6YEwQ0= @@ -1211,6 +1270,8 @@ github.com/tdakkota/asciicheck v0.2.0 h1:o8jvnUANo0qXtnslk2d3nMKTFNlOnJjRrNcj0j9 github.com/tdakkota/asciicheck v0.2.0/go.mod h1:Qb7Y9EgjCLJGup51gDHFzbI08/gbGhL/UVhYIPWG2rg= github.com/tektoncd/pipeline v0.55.0 h1:RUfqSC/J1dMrdfu1ThJreHojwGXcWc8P131el/c+c1c= github.com/tektoncd/pipeline v0.55.0/go.mod h1:fFbFAhyNwsPQpitrwhi+Wp0Xse2EkIE1LtGKC08rVqo= +github.com/tektoncd/pipeline v0.55.1-0.20240105143253-fe47c9bc893a h1:V5ORq8a77oo9Z8DBN13Wy5uSyfa7lQeqQxhpCtI1CQk= +github.com/tektoncd/pipeline v0.55.1-0.20240105143253-fe47c9bc893a/go.mod h1:umFJQ9olExOs+BIOitYWsVtOJaKaBe0o2f/5AIB+Cxc= github.com/tektoncd/plumbing v0.0.0-20230907180608-5625252a2de1 h1:9paprRIBXQgcvdhGq3wKiSspXP0JIFSY52ru3sIMjKM= github.com/tektoncd/plumbing v0.0.0-20230907180608-5625252a2de1/go.mod h1:7eWs1XNkmReggow7ggRbRyRuHi7646B8b2XipCZ3VOw= github.com/tenntenn/modver v1.0.1 h1:2klLppGhDgzJrScMpkj9Ujy3rXPUspSjAcev9tSEBgA= @@ -1247,6 +1308,7 @@ github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/X github.com/uudashr/gocognit v1.1.2 h1:l6BAEKJqQH2UpKAPKdMfZf5kE4W/2xk8pfU1OVLvniI= github.com/uudashr/gocognit v1.1.2/go.mod h1:aAVdLURqcanke8h3vg35BC++eseDm66Z7KmchI5et4k= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinCts= github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk= github.com/xanzy/go-gitlab v0.94.0 h1:GmBl2T5zqUHqyjkxFSvsT7CbelGdAH/dmBqUBqS+4BE= @@ -1276,10 +1338,15 @@ github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7Jul github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a h1:fZHgsYlfvtyqToslyjUt3VOPF4J7aK/3MPcK7xp3PDk= github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a/go.mod h1:ul22v+Nro/R083muKhosV54bj5niojjWZvU8xrevuH4= github.com/ysmood/fetchup v0.2.3 h1:ulX+SonA0Vma5zUFXtv52Kzip/xe7aj4vqT5AJwQ+ZQ= +github.com/ysmood/fetchup v0.2.3/go.mod h1:xhibcRKziSvol0H1/pj33dnKrYyI2ebIvz5cOOkYGns= github.com/ysmood/goob v0.4.0 h1:HsxXhyLBeGzWXnqVKtmT9qM7EuVs/XOgkX7T6r1o1AQ= +github.com/ysmood/goob v0.4.0/go.mod h1:u6yx7ZhS4Exf2MwciFr6nIM8knHQIE22lFpWHnfql18= github.com/ysmood/got v0.34.1 h1:IrV2uWLs45VXNvZqhJ6g2nIhY+pgIG1CUoOcqfXFl1s= +github.com/ysmood/got v0.34.1/go.mod h1:yddyjq/PmAf08RMLSwDjPyCvHvYed+WjHnQxpH851LM= github.com/ysmood/gson v0.7.3 h1:QFkWbTH8MxyUTKPkVWAENJhxqdBa4lYTQWqZCiLG6kE= +github.com/ysmood/gson v0.7.3/go.mod h1:3Kzs5zDl21g5F/BlLTNcuAGAYLKt2lV5G8D1zF3RNmg= github.com/ysmood/leakless v0.8.0 h1:BzLrVoiwxikpgEQR0Lk8NyBN5Cit2b1z+u0mgL4ZJak= +github.com/ysmood/leakless v0.8.0/go.mod h1:R8iAXPRaG97QJwqxs74RdwzcRHT1SWCGTNqY8q0JvMQ= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.30/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -1289,16 +1356,20 @@ github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1 github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/zalando/go-keyring v0.2.2 h1:f0xmpYiSrHtSNAVgwip93Cg8tuF45HJM6rHq/A5RI/4= +github.com/zalando/go-keyring v0.2.2/go.mod h1:sI3evg9Wvpw3+n4SqplGSJUMwtDeROfD4nsFz4z9PG0= github.com/zeebo/errs v1.3.0 h1:hmiaKqgYZzcVgRL1Vkc1Mn2914BbzB0IBxs+ebeutGs= github.com/zeebo/errs v1.3.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4= gitlab.com/bosi/decorder v0.4.1 h1:VdsdfxhstabyhZovHafFw+9eJ6eU0d2CkFNJcZz/NU4= gitlab.com/bosi/decorder v0.4.1/go.mod h1:jecSqWUew6Yle1pCr2eLWTensJMmsxHsBwt+PVbkAqA= go-simpler.org/assert v0.6.0 h1:QxSrXa4oRuo/1eHMXSBFHKvJIpWABayzKldqZyugG7E= +go-simpler.org/assert v0.6.0/go.mod h1:74Eqh5eI6vCK6Y5l3PI8ZYFXG4Sa+tkr70OIPJAUr28= go-simpler.org/sloglint v0.1.2 h1:IjdhF8NPxyn0Ckn2+fuIof7ntSnVUAqBFcQRrnG9AiM= go-simpler.org/sloglint v0.1.2/go.mod h1:2LL+QImPfTslD5muNPydAEYmpXIj6o/WYcqnJjLi4o4= go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= go.mongodb.org/mongo-driver v1.13.0 h1:67DgFFjYOCMWdtTEmKFpV3ffWlFnh+CYZ8ZS/tXWUfY= go.mongodb.org/mongo-driver v1.13.0/go.mod h1:/rGBTebI3XYboVmgz+Wv3Bcbl3aD0QF9zl6kDDw18rQ= +go.mongodb.org/mongo-driver v1.13.1 h1:YIc7HTYsKndGK4RFzJ3covLz1byri52x0IoMB0Pt/vk= +go.mongodb.org/mongo-driver v1.13.1/go.mod h1:wcDf1JBCXy2mOW0bWHwO/IOYqdca1MPCwDtFu/Z9+eo= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -1315,9 +1386,11 @@ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1: go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc= go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= go.opentelemetry.io/otel/exporters/jaeger v1.17.0 h1:D7UpUy2Xc2wsi1Ras6V40q806WM07rqoCWzXu7Sqy+4= +go.opentelemetry.io/otel/exporters/jaeger v1.17.0/go.mod h1:nPCqOnEH9rNLKqH/+rrUjiMzHJdV1BlpKcTwRTyKkKI= go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4= go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= +go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc= go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= @@ -1331,6 +1404,7 @@ go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0 go.uber.org/automaxprocs v1.5.2 h1:2LxUOGiR3O6tw8ui5sZa2LAaHnsviZdVOUZw4fvbnME= go.uber.org/automaxprocs v1.5.2/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= @@ -1989,6 +2063,7 @@ gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= +gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -2004,6 +2079,8 @@ k8s.io/apiextensions-apiserver v0.26.5 h1:VJ946z9RjyCPn3qiz4Kus/UYjCRrdn1xUvEsJF k8s.io/apiextensions-apiserver v0.26.5/go.mod h1:Olsde7ZNWnyz9rsL13iXYXmL1h7kWujtKeC3yWVCDPo= k8s.io/apimachinery v0.28.3 h1:B1wYx8txOaCQG0HmYF6nbpU8dg6HvA06x5tEffvOe7A= k8s.io/apimachinery v0.28.3/go.mod h1:uQTKmIqs+rAYaq+DFaoD2X7pcjLOqbQX2AOiO0nIpb8= +k8s.io/apimachinery v0.29.0 h1:+ACVktwyicPz0oc6MTMLwa2Pw3ouLAfAon1wPLtG48o= +k8s.io/apimachinery v0.29.0/go.mod h1:eVBxQ/cwiJxH58eK/jd/vAk4mrxmVlnpBH5J2GbMeis= k8s.io/client-go v0.28.3 h1:2OqNb72ZuTZPKCl+4gTKvqao0AMOl9f3o2ijbAj3LI4= k8s.io/client-go v0.28.3/go.mod h1:LTykbBp9gsA7SwqirlCXBWtK0guzfhpoW4qSm7i9dxo= k8s.io/code-generator v0.26.5 h1:0p350mqxkbs29h8/yF4AMilApLVUhnRx3EAfhTWR5fY= @@ -2014,6 +2091,8 @@ k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= +k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= @@ -2037,7 +2116,10 @@ sigs.k8s.io/release-utils v0.7.7 h1:JKDOvhCk6zW8ipEOkpTGDH/mW3TI+XqtPp16aaQ79FU= sigs.k8s.io/release-utils v0.7.7/go.mod h1:iU7DGVNi3umZJ8q6aHyUFzsDUIaYwNnNKGHo3YE5E3s= sigs.k8s.io/structured-merge-diff/v4 v4.3.0 h1:UZbZAZfX0wV2zr7YZorDz6GXROfDFj6LvqCRm4VUVKk= sigs.k8s.io/structured-merge-diff/v4 v4.3.0/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= software.sslmate.com/src/go-pkcs12 v0.2.0 h1:nlFkj7bTysH6VkC4fGphtjXRbezREPgrHuJG20hBGPE= +software.sslmate.com/src/go-pkcs12 v0.2.0/go.mod h1:23rNcYsMabIc1otwLpTkCCPwUq6kQsTyowttG/as0kQ= diff --git a/pkg/artifacts/signable.go b/pkg/artifacts/signable.go index 1ac9492f99..78e796a865 100644 --- a/pkg/artifacts/signable.go +++ b/pkg/artifacts/signable.go @@ -24,9 +24,11 @@ import ( "github.com/google/go-containerregistry/pkg/name" "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common" "github.com/opencontainers/go-digest" + "github.com/opentracing/opentracing-go/log" "github.com/tektoncd/chains/internal/backport" "github.com/tektoncd/chains/pkg/chains/objects" "github.com/tektoncd/chains/pkg/config" + v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" "k8s.io/apimachinery/pkg/util/sets" "knative.dev/pkg/logging" @@ -65,12 +67,12 @@ type TaskRunArtifact struct{} var _ Signable = &TaskRunArtifact{} func (ta *TaskRunArtifact) ShortKey(obj interface{}) string { - tro := obj.(*objects.TaskRunObject) + tro := obj.(*objects.TaskRunObjectV1) return "taskrun-" + string(tro.UID) } func (ta *TaskRunArtifact) FullKey(obj interface{}) string { - tro := obj.(*objects.TaskRunObject) + tro := obj.(*objects.TaskRunObjectV1) gvk := tro.GetGroupVersionKind() return fmt.Sprintf("%s-%s-%s-%s", gvk.Group, gvk.Version, gvk.Kind, tro.UID) } @@ -104,12 +106,12 @@ type PipelineRunArtifact struct{} var _ Signable = &PipelineRunArtifact{} func (pa *PipelineRunArtifact) ShortKey(obj interface{}) string { - pro := obj.(*objects.PipelineRunObject) + pro := obj.(*objects.PipelineRunObjectV1) return "pipelinerun-" + string(pro.UID) } func (pa *PipelineRunArtifact) FullKey(obj interface{}) string { - pro := obj.(*objects.PipelineRunObject) + pro := obj.(*objects.PipelineRunObjectV1) gvk := pro.GetGroupVersionKind() return fmt.Sprintf("%s-%s-%s-%s", gvk.Group, gvk.Version, gvk.Kind, pro.UID) } @@ -149,40 +151,42 @@ type image struct { } func (oa *OCIArtifact) ExtractObjects(ctx context.Context, obj objects.TektonObject) []interface{} { - log := logging.FromContext(ctx) objs := []interface{}{} // TODO: Not applicable to PipelineRuns, should look into a better way to separate this out - if tr, ok := obj.GetObject().(*v1beta1.TaskRun); ok { - imageResourceNames := map[string]*image{} - if tr.Status.TaskSpec != nil && tr.Status.TaskSpec.Resources != nil { - for _, output := range tr.Status.TaskSpec.Resources.Outputs { - if output.Type == backport.PipelineResourceTypeImage { - imageResourceNames[output.Name] = &image{} + if trV1, ok := obj.GetObject().(*v1.TaskRun); ok { + trV1Beta1 := &v1beta1.TaskRun{} //nolint:staticcheck + if err := trV1Beta1.ConvertFrom(ctx, trV1); err == nil { + imageResourceNames := map[string]*image{} + if trV1Beta1.Status.TaskSpec != nil && trV1Beta1.Status.TaskSpec.Resources != nil { //nolint:staticcheck + for _, output := range trV1Beta1.Status.TaskSpec.Resources.Outputs { //nolint:staticcheck + if output.Type == backport.PipelineResourceTypeImage { + imageResourceNames[output.Name] = &image{} + } } } - } - - for _, rr := range tr.Status.ResourcesResult { - img, ok := imageResourceNames[rr.ResourceName] - if !ok { - continue - } - // We have a result for an image! - if rr.Key == "url" { - img.url = rr.Value - } else if rr.Key == "digest" { - img.digest = rr.Value + for _, rr := range trV1Beta1.Status.ResourcesResult { + img, ok := imageResourceNames[rr.ResourceName] + if !ok { + continue + } + // We have a result for an image! + if rr.Key == "url" { + img.url = rr.Value + } else if rr.Key == "digest" { + img.digest = rr.Value + } } - } - for _, image := range imageResourceNames { - dgst, err := name.NewDigest(fmt.Sprintf("%s@%s", image.url, image.digest)) - if err != nil { - log.Error(err) - continue + for _, image := range imageResourceNames { + dgst, err := name.NewDigest(fmt.Sprintf("%s@%s", image.url, image.digest)) + if err != nil { + log.Error(err) + continue + } + + objs = append(objs, dgst) } - objs = append(objs, dgst) } } @@ -208,7 +212,6 @@ func ExtractOCIImagesFromResults(ctx context.Context, obj objects.TektonObject) logger.Errorf("error getting digest: %v", err) continue } - objs = append(objs, dgst) } diff --git a/pkg/artifacts/signable_test.go b/pkg/artifacts/signable_test.go index b3181020c3..5cbdab0847 100644 --- a/pkg/artifacts/signable_test.go +++ b/pkg/artifacts/signable_test.go @@ -14,6 +14,7 @@ limitations under the License. package artifacts import ( + "encoding/json" "fmt" "sort" "testing" @@ -23,6 +24,7 @@ import ( "github.com/google/go-containerregistry/pkg/name" "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common" "github.com/tektoncd/chains/pkg/chains/objects" + v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" logtesting "knative.dev/pkg/logging/testing" @@ -52,7 +54,7 @@ func TestOCIArtifact_ExtractObjects(t *testing.T) { }{ { name: "one image", - obj: objects.NewTaskRunObject(&v1beta1.TaskRun{ + obj: objects.NewTaskRunObjectV1Beta1(&v1beta1.TaskRun{ //nolint:staticcheck TypeMeta: metav1.TypeMeta{ Kind: "TaskRun", }, @@ -71,10 +73,10 @@ func TestOCIArtifact_ExtractObjects(t *testing.T) { }, }, TaskSpec: &v1beta1.TaskSpec{ - Resources: &v1beta1.TaskResources{ - Outputs: []v1beta1.TaskResource{ + Resources: &v1beta1.TaskResources{ //nolint:staticcheck + Outputs: []v1beta1.TaskResource{ //nolint:staticcheck { - ResourceDeclaration: v1beta1.ResourceDeclaration{ + ResourceDeclaration: v1beta1.ResourceDeclaration{ //nolint:staticcheck Name: "my-image", Type: "image", }, @@ -89,7 +91,7 @@ func TestOCIArtifact_ExtractObjects(t *testing.T) { }, { name: "two images", - obj: objects.NewTaskRunObject(&v1beta1.TaskRun{ + obj: objects.NewTaskRunObjectV1Beta1(&v1beta1.TaskRun{ //nolint:staticcheck TypeMeta: metav1.TypeMeta{ Kind: "TaskRun", }, @@ -118,16 +120,16 @@ func TestOCIArtifact_ExtractObjects(t *testing.T) { }, }, TaskSpec: &v1beta1.TaskSpec{ - Resources: &v1beta1.TaskResources{ - Outputs: []v1beta1.TaskResource{ + Resources: &v1beta1.TaskResources{ //nolint:staticcheck + Outputs: []v1beta1.TaskResource{ //nolint:staticcheck { - ResourceDeclaration: v1beta1.ResourceDeclaration{ + ResourceDeclaration: v1beta1.ResourceDeclaration{ //nolint:staticcheck Name: "my-image1", Type: "image", }, }, { - ResourceDeclaration: v1beta1.ResourceDeclaration{ + ResourceDeclaration: v1beta1.ResourceDeclaration{ //nolint:staticcheck Name: "my-image2", Type: "image", }, @@ -145,7 +147,7 @@ func TestOCIArtifact_ExtractObjects(t *testing.T) { }, { name: "resource and result", - obj: objects.NewTaskRunObject(&v1beta1.TaskRun{ + obj: objects.NewTaskRunObjectV1Beta1(&v1beta1.TaskRun{ //nolint:staticcheck TypeMeta: metav1.TypeMeta{ Kind: "TaskRun", }, @@ -182,10 +184,10 @@ func TestOCIArtifact_ExtractObjects(t *testing.T) { Name: "IMAGE_DIGEST", }, }, - Resources: &v1beta1.TaskResources{ - Outputs: []v1beta1.TaskResource{ + Resources: &v1beta1.TaskResources{ //nolint:staticcheck + Outputs: []v1beta1.TaskResource{ //nolint:staticcheck { - ResourceDeclaration: v1beta1.ResourceDeclaration{ + ResourceDeclaration: v1beta1.ResourceDeclaration{ //nolint:staticcheck Name: "my-image", Type: "image", }, @@ -202,7 +204,7 @@ func TestOCIArtifact_ExtractObjects(t *testing.T) { }, { name: "extra", - obj: objects.NewTaskRunObject(&v1beta1.TaskRun{ + obj: objects.NewTaskRunObjectV1Beta1(&v1beta1.TaskRun{ //nolint:staticcheck TypeMeta: metav1.TypeMeta{ Kind: "TaskRun", }, @@ -241,10 +243,10 @@ func TestOCIArtifact_ExtractObjects(t *testing.T) { }, }, TaskSpec: &v1beta1.TaskSpec{ - Resources: &v1beta1.TaskResources{ - Outputs: []v1beta1.TaskResource{ + Resources: &v1beta1.TaskResources{ //nolint:staticcheck + Outputs: []v1beta1.TaskResource{ //nolint:staticcheck { - ResourceDeclaration: v1beta1.ResourceDeclaration{ + ResourceDeclaration: v1beta1.ResourceDeclaration{ //nolint:staticcheck Name: "my-image", Type: "image", }, @@ -256,15 +258,16 @@ func TestOCIArtifact_ExtractObjects(t *testing.T) { }, }), want: []interface{}{createDigest(t, "gcr.io/foo/bar@sha256:05f95b26ed10668b7183c1e2da98610e91372fa9f510046d4ce5812addad86b5")}, - }, { + }, + { name: "images", - obj: objects.NewTaskRunObject(&v1beta1.TaskRun{ - Status: v1beta1.TaskRunStatus{ - TaskRunStatusFields: v1beta1.TaskRunStatusFields{ - TaskRunResults: []v1beta1.TaskRunResult{ + obj: objects.NewTaskRunObjectV1(&v1.TaskRun{ + Status: v1.TaskRunStatus{ + TaskRunStatusFields: v1.TaskRunStatusFields{ + Results: []v1.TaskRunResult{ { Name: "IMAGES", - Value: *v1beta1.NewStructuredValues(fmt.Sprintf(" \n \tgcr.io/foo/bar@%s\n,gcr.io/baz/bar@%s", digest1, digest2)), + Value: *v1.NewStructuredValues(fmt.Sprintf(" \n \tgcr.io/foo/bar@%s\n,gcr.io/baz/bar@%s", digest1, digest2)), }, }, }, @@ -276,13 +279,13 @@ func TestOCIArtifact_ExtractObjects(t *testing.T) { }, }, { name: "images-newline", - obj: objects.NewTaskRunObject(&v1beta1.TaskRun{ - Status: v1beta1.TaskRunStatus{ - TaskRunStatusFields: v1beta1.TaskRunStatusFields{ - TaskRunResults: []v1beta1.TaskRunResult{ + obj: objects.NewTaskRunObjectV1(&v1.TaskRun{ + Status: v1.TaskRunStatus{ + TaskRunStatusFields: v1.TaskRunStatusFields{ + Results: []v1.TaskRunResult{ { Name: "IMAGES", - Value: *v1beta1.NewStructuredValues(fmt.Sprintf("gcr.io/foo/bar@%s\ngcr.io/baz/bar@%s\n\n", digest1, digest2)), + Value: *v1.NewStructuredValues(fmt.Sprintf("gcr.io/foo/bar@%s\ngcr.io/baz/bar@%s\n\n", digest1, digest2)), }, }, }, @@ -298,6 +301,19 @@ func TestOCIArtifact_ExtractObjects(t *testing.T) { t.Run(tt.name, func(t *testing.T) { ctx := logtesting.TestContextWithLogger(t) oa := &OCIArtifact{} + if trV1Beta1, ok := tt.obj.GetObject().(*v1beta1.TaskRun); ok { //nolint:staticcheck + trV1 := &v1.TaskRun{} + if err := trV1Beta1.ConvertTo(ctx, trV1); err == nil { + if trV1Beta1.Status.TaskRunStatusFields.TaskSpec != nil && trV1Beta1.Status.TaskRunStatusFields.TaskSpec.Resources != nil { //nolint:staticcheck + jsonData, err := json.Marshal(trV1Beta1.Status.TaskRunStatusFields.TaskSpec.Resources) //nolint:staticcheck + if err != nil { + t.Errorf("Error serializing to JSON: %v", err) + } + trV1.Annotations["tekton.dev/v1beta1-status-taskrunstatusfields-taskspec-resources"] = string(jsonData) + } + tt.obj = objects.NewTaskRunObjectV1(trV1) + } + } got := oa.ExtractObjects(ctx, tt.obj) sort.Slice(got, func(i, j int) bool { a := got[i].(name.Digest) @@ -312,25 +328,25 @@ func TestOCIArtifact_ExtractObjects(t *testing.T) { } func TestExtractOCIImagesFromResults(t *testing.T) { - tr := &v1beta1.TaskRun{ - Status: v1beta1.TaskRunStatus{ - TaskRunStatusFields: v1beta1.TaskRunStatusFields{ - TaskRunResults: []v1beta1.TaskRunResult{ - {Name: "img1_IMAGE_URL", Value: *v1beta1.NewStructuredValues("img1")}, - {Name: "img1_IMAGE_DIGEST", Value: *v1beta1.NewStructuredValues(digest1)}, - {Name: "img2_IMAGE_URL", Value: *v1beta1.NewStructuredValues("img2")}, - {Name: "img2_IMAGE_DIGEST", Value: *v1beta1.NewStructuredValues(digest2)}, - {Name: "IMAGE_URL", Value: *v1beta1.NewStructuredValues("img3")}, - {Name: "IMAGE_DIGEST", Value: *v1beta1.NewStructuredValues(digest1)}, - {Name: "img4_IMAGE_URL", Value: *v1beta1.NewStructuredValues("img4")}, - {Name: "img5_IMAGE_DIGEST", Value: *v1beta1.NewStructuredValues("sha123:abc")}, - {Name: "empty_str_IMAGE_DIGEST", Value: *v1beta1.NewStructuredValues("")}, - {Name: "empty_str_IMAGE_URL", Value: *v1beta1.NewStructuredValues("")}, + tr := &v1.TaskRun{ + Status: v1.TaskRunStatus{ + TaskRunStatusFields: v1.TaskRunStatusFields{ + Results: []v1.TaskRunResult{ + {Name: "img1_IMAGE_URL", Value: *v1.NewStructuredValues("img1")}, + {Name: "img1_IMAGE_DIGEST", Value: *v1.NewStructuredValues(digest1)}, + {Name: "img2_IMAGE_URL", Value: *v1.NewStructuredValues("img2")}, + {Name: "img2_IMAGE_DIGEST", Value: *v1.NewStructuredValues(digest2)}, + {Name: "IMAGE_URL", Value: *v1.NewStructuredValues("img3")}, + {Name: "IMAGE_DIGEST", Value: *v1.NewStructuredValues(digest1)}, + {Name: "img4_IMAGE_URL", Value: *v1.NewStructuredValues("img4")}, + {Name: "img5_IMAGE_DIGEST", Value: *v1.NewStructuredValues("sha123:abc")}, + {Name: "empty_str_IMAGE_DIGEST", Value: *v1.NewStructuredValues("")}, + {Name: "empty_str_IMAGE_URL", Value: *v1.NewStructuredValues("")}, }, }, }, } - obj := objects.NewTaskRunObject(tr) + obj := objects.NewTaskRunObjectV1(tr) want := []interface{}{ createDigest(t, fmt.Sprintf("img1@%s", digest1)), createDigest(t, fmt.Sprintf("img2@%s", digest2)), @@ -349,23 +365,23 @@ func TestExtractOCIImagesFromResults(t *testing.T) { } func TestExtractSignableTargetFromResults(t *testing.T) { - tr := &v1beta1.TaskRun{ - Status: v1beta1.TaskRunStatus{ - TaskRunStatusFields: v1beta1.TaskRunStatusFields{ - TaskRunResults: []v1beta1.TaskRunResult{ - {Name: "mvn1_ARTIFACT_URI", Value: *v1beta1.NewStructuredValues("projects/test-project/locations/us-west4/repositories/test-repo/mavenArtifacts/com.google.guava:guava:31.0-jre")}, - {Name: "mvn1_ARTIFACT_DIGEST", Value: *v1beta1.NewStructuredValues(digest1)}, - {Name: "mvn1_pom_ARTIFACT_URI", Value: *v1beta1.NewStructuredValues("com.google.guava:guava:31.0-jre.pom")}, - {Name: "mvn1_pom_ARTIFACT_DIGEST", Value: *v1beta1.NewStructuredValues(digest2)}, - {Name: "mvn1_src_ARTIFACT_URI", Value: *v1beta1.NewStructuredValues("com.google.guava:guava:31.0-jre-sources.jar")}, - {Name: "mvn1_src_ARTIFACT_DIGEST", Value: *v1beta1.NewStructuredValues(digest3)}, - {Name: "mvn2_ARTIFACT_URI", Value: *v1beta1.NewStructuredValues("projects/test-project/locations/us-west4/repositories/test-repo/mavenArtifacts/a.b.c:d:1.0-jre")}, - {Name: "mvn2_ARTIFACT_DIGEST", Value: *v1beta1.NewStructuredValues(digest4)}, - {Name: "ARTIFACT_URI", Value: *v1beta1.NewStructuredValues("projects/test-project/locations/us-west4/repositories/test-repo/mavenArtifacts/empty_prefix")}, - {Name: "ARTIFACT_DIGEST", Value: *v1beta1.NewStructuredValues(digest1)}, - {Name: "miss_target_name_ARTIFACT_DIGEST", Value: *v1beta1.NewStructuredValues(digest1)}, - {Name: "wrong_digest_format_ARTIFACT_URI", Value: *v1beta1.NewStructuredValues("projects/test-project/locations/us-west4/repositories/test-repo/mavenArtifacts/wrong_digest_format")}, - {Name: "wrong_digest_format_ARTIFACT_DIGEST", Value: *v1beta1.NewStructuredValues("abc")}, + tr := &v1.TaskRun{ + Status: v1.TaskRunStatus{ + TaskRunStatusFields: v1.TaskRunStatusFields{ + Results: []v1.TaskRunResult{ + {Name: "mvn1_ARTIFACT_URI", Value: *v1.NewStructuredValues("projects/test-project/locations/us-west4/repositories/test-repo/mavenArtifacts/com.google.guava:guava:31.0-jre")}, + {Name: "mvn1_ARTIFACT_DIGEST", Value: *v1.NewStructuredValues(digest1)}, + {Name: "mvn1_pom_ARTIFACT_URI", Value: *v1.NewStructuredValues("com.google.guava:guava:31.0-jre.pom")}, + {Name: "mvn1_pom_ARTIFACT_DIGEST", Value: *v1.NewStructuredValues(digest2)}, + {Name: "mvn1_src_ARTIFACT_URI", Value: *v1.NewStructuredValues("com.google.guava:guava:31.0-jre-sources.jar")}, + {Name: "mvn1_src_ARTIFACT_DIGEST", Value: *v1.NewStructuredValues(digest3)}, + {Name: "mvn2_ARTIFACT_URI", Value: *v1.NewStructuredValues("projects/test-project/locations/us-west4/repositories/test-repo/mavenArtifacts/a.b.c:d:1.0-jre")}, + {Name: "mvn2_ARTIFACT_DIGEST", Value: *v1.NewStructuredValues(digest4)}, + {Name: "ARTIFACT_URI", Value: *v1.NewStructuredValues("projects/test-project/locations/us-west4/repositories/test-repo/mavenArtifacts/empty_prefix")}, + {Name: "ARTIFACT_DIGEST", Value: *v1.NewStructuredValues(digest1)}, + {Name: "miss_target_name_ARTIFACT_DIGEST", Value: *v1.NewStructuredValues(digest1)}, + {Name: "wrong_digest_format_ARTIFACT_URI", Value: *v1.NewStructuredValues("projects/test-project/locations/us-west4/repositories/test-repo/mavenArtifacts/wrong_digest_format")}, + {Name: "wrong_digest_format_ARTIFACT_DIGEST", Value: *v1.NewStructuredValues("abc")}, }, }, }, @@ -378,7 +394,7 @@ func TestExtractSignableTargetFromResults(t *testing.T) { {URI: "projects/test-project/locations/us-west4/repositories/test-repo/mavenArtifacts/empty_prefix", Digest: digest1}, } ctx := logtesting.TestContextWithLogger(t) - got := ExtractSignableTargetFromResults(ctx, objects.NewTaskRunObject(tr)) + got := ExtractSignableTargetFromResults(ctx, objects.NewTaskRunObjectV1(tr)) sort.Slice(got, func(i, j int) bool { return got[i].URI < got[j].URI }) @@ -391,13 +407,13 @@ func TestExtractSignableTargetFromResults(t *testing.T) { } func TestExtractStructuredTargetFromResults(t *testing.T) { - tr := &v1beta1.TaskRun{ - Status: v1beta1.TaskRunStatus{ - TaskRunStatusFields: v1beta1.TaskRunStatusFields{ - TaskRunResults: []v1beta1.TaskRunResult{ + tr := &v1.TaskRun{ + Status: v1.TaskRunStatus{ + TaskRunStatusFields: v1.TaskRunStatusFields{ + Results: []v1.TaskRunResult{ { Name: "mvn1_pkg" + "_" + ArtifactsOutputsResultName, - Value: *v1beta1.NewObject(map[string]string{ + Value: *v1.NewObject(map[string]string{ "uri": "projects/test-project/locations/us-west4/repositories/test-repo/mavenArtifacts/com.google.guava:guava:31.0-jre", "digest": digest1, "signable_type": "", @@ -405,7 +421,7 @@ func TestExtractStructuredTargetFromResults(t *testing.T) { }, { Name: "mvn1_pom_sha512" + "_" + ArtifactsOutputsResultName, - Value: *v1beta1.NewObject(map[string]string{ + Value: *v1.NewObject(map[string]string{ "uri": "com.google.guava:guava:31.0-jre.pom", "digest": digest2, "signable_type": "", @@ -413,56 +429,56 @@ func TestExtractStructuredTargetFromResults(t *testing.T) { }, { Name: "img1_input" + "_" + ArtifactsInputsResultName, - Value: *v1beta1.NewObject(map[string]string{ + Value: *v1.NewObject(map[string]string{ "uri": "gcr.io/foo/bar", "digest": digest3, }), }, { Name: "img2_input_sha1" + "_" + ArtifactsInputsResultName, - Value: *v1beta1.NewObject(map[string]string{ + Value: *v1.NewObject(map[string]string{ "uri": "gcr.io/foo/bar", "digest": digest_sha1, }), }, { Name: "img2_input_incorrect_sha1" + "_" + ArtifactsInputsResultName, - Value: *v1beta1.NewObject(map[string]string{ + Value: *v1.NewObject(map[string]string{ "uri": "gcr.io/foo/bar", "digest": digest_incorrect_sha1, }), }, { Name: "img3_input_sha384" + "_" + ArtifactsInputsResultName, - Value: *v1beta1.NewObject(map[string]string{ + Value: *v1.NewObject(map[string]string{ "uri": "gcr.io/foo/bar", "digest": digest_sha384, }), }, { Name: "img3_input_incorrect_sha384" + "_" + ArtifactsInputsResultName, - Value: *v1beta1.NewObject(map[string]string{ + Value: *v1.NewObject(map[string]string{ "uri": "gcr.io/foo/bar", "digest": digest_incorrect_sha384, }), }, { Name: "img4_input_sha512" + "_" + ArtifactsInputsResultName, - Value: *v1beta1.NewObject(map[string]string{ + Value: *v1.NewObject(map[string]string{ "uri": "gcr.io/foo/bar", "digest": digest_sha512, }), }, { Name: "img4_input_incorrect_sha512" + "_" + ArtifactsInputsResultName, - Value: *v1beta1.NewObject(map[string]string{ + Value: *v1.NewObject(map[string]string{ "uri": "gcr.io/foo/bar", "digest": digest_incorrect_sha512, }), }, { Name: "img2_input_no_digest" + "_" + ArtifactsInputsResultName, - Value: *v1beta1.NewObject(map[string]string{ + Value: *v1.NewObject(map[string]string{ "uri": "gcr.io/foo/foo", "digest": "", }), @@ -479,7 +495,7 @@ func TestExtractStructuredTargetFromResults(t *testing.T) { {URI: "gcr.io/foo/bar", Digest: digest_sha512}, } ctx := logtesting.TestContextWithLogger(t) - gotInputs := ExtractStructuredTargetFromResults(ctx, objects.NewTaskRunObject(tr), ArtifactsInputsResultName) + gotInputs := ExtractStructuredTargetFromResults(ctx, objects.NewTaskRunObjectV1(tr), ArtifactsInputsResultName) if diff := cmp.Diff(gotInputs, wantInputs, cmpopts.SortSlices(func(x, y *StructuredSignable) bool { return x.Digest < y.Digest })); diff != "" { t.Errorf("Inputs are not as expected: %v", diff) } @@ -488,7 +504,7 @@ func TestExtractStructuredTargetFromResults(t *testing.T) { {URI: "projects/test-project/locations/us-west4/repositories/test-repo/mavenArtifacts/com.google.guava:guava:31.0-jre", Digest: digest1}, {URI: "com.google.guava:guava:31.0-jre.pom", Digest: digest2}, } - gotOutputs := ExtractStructuredTargetFromResults(ctx, objects.NewTaskRunObject(tr), ArtifactsOutputsResultName) + gotOutputs := ExtractStructuredTargetFromResults(ctx, objects.NewTaskRunObjectV1(tr), ArtifactsOutputsResultName) opts := append(ignore, cmpopts.SortSlices(func(x, y *StructuredSignable) bool { return x.Digest < y.Digest })) if diff := cmp.Diff(gotOutputs, wantOutputs, opts...); diff != "" { t.Error(diff) @@ -496,27 +512,27 @@ func TestExtractStructuredTargetFromResults(t *testing.T) { } func TestRetrieveMaterialsFromStructuredResults(t *testing.T) { - tr := &v1beta1.TaskRun{ - Status: v1beta1.TaskRunStatus{ - TaskRunStatusFields: v1beta1.TaskRunStatusFields{ - TaskRunResults: []v1beta1.TaskRunResult{ + tr := &v1.TaskRun{ + Status: v1.TaskRunStatus{ + TaskRunStatusFields: v1.TaskRunStatusFields{ + Results: []v1.TaskRunResult{ { Name: "img1_input" + "_" + ArtifactsInputsResultName, - Value: *v1beta1.NewObject(map[string]string{ + Value: *v1.NewObject(map[string]string{ "uri": OCIScheme + "gcr.io/foo/bar", "digest": "sha256:05f95b26ed10668b7183c1e2da98610e91372fa9f510046d4ce5812addad86b7", }), }, { Name: "img2_input_no_digest" + "_" + ArtifactsInputsResultName, - Value: *v1beta1.NewObject(map[string]string{ + Value: *v1.NewObject(map[string]string{ "uri": OCIScheme + "gcr.io/foo/foo", "digest": "", }), }, { Name: "img2_input_invalid_digest" + "_" + ArtifactsInputsResultName, - Value: *v1beta1.NewObject(map[string]string{ + Value: *v1.NewObject(map[string]string{ "uri": OCIScheme + "gcr.io/foo/foo", "digest": "sha:123", }), @@ -532,7 +548,7 @@ func TestRetrieveMaterialsFromStructuredResults(t *testing.T) { }, } ctx := logtesting.TestContextWithLogger(t) - gotMaterials := RetrieveMaterialsFromStructuredResults(ctx, objects.NewTaskRunObject(tr), ArtifactsInputsResultName) + gotMaterials := RetrieveMaterialsFromStructuredResults(ctx, objects.NewTaskRunObjectV1(tr), ArtifactsInputsResultName) if diff := cmp.Diff(gotMaterials, wantMaterials, ignore...); diff != "" { t.Fatalf("Materials not the same %s", diff) @@ -552,7 +568,7 @@ func TestValidateResults(t *testing.T) { categoryMarker: ArtifactsOutputsResultName, obj: objects.Result{ Name: "valid_result-ARTIFACT_OUTPUTS", - Value: v1beta1.ParamValue{ + Value: v1.ParamValue{ ObjectVal: map[string]string{ "uri": "gcr.io/foo/bar", "digest": digest3, @@ -567,7 +583,7 @@ func TestValidateResults(t *testing.T) { categoryMarker: ArtifactsOutputsResultName, obj: objects.Result{ Name: "missing_digest-ARTIFACT_OUTPUTS", - Value: v1beta1.ParamValue{ + Value: v1.ParamValue{ ObjectVal: map[string]string{ "uri": "gcr.io/foo/bar", }, @@ -581,7 +597,7 @@ func TestValidateResults(t *testing.T) { categoryMarker: ArtifactsOutputsResultName, obj: objects.Result{ Name: "missing_digest-ARTIFACT_OUTPUTS", - Value: v1beta1.ParamValue{ + Value: v1.ParamValue{ ObjectVal: map[string]string{ "digest": digest3, }, @@ -595,7 +611,7 @@ func TestValidateResults(t *testing.T) { categoryMarker: ArtifactsOutputsResultName, obj: objects.Result{ Name: "missing_digest-ARTIFACT_OUTPUTS", - Value: v1beta1.ParamValue{ + Value: v1.ParamValue{ ObjectVal: map[string]string{ "uri": "gcr.io/foo/bar", "digest": "", @@ -610,7 +626,7 @@ func TestValidateResults(t *testing.T) { categoryMarker: ArtifactsOutputsResultName, obj: objects.Result{ Name: "missing_digest-ARTIFACTs_OUTPUTS", - Value: v1beta1.ParamValue{ + Value: v1.ParamValue{ ObjectVal: map[string]string{ "uri": "gcr.io/foo/bar", "digest": digest3, diff --git a/pkg/chains/annotations_test.go b/pkg/chains/annotations_test.go index d8dd0ed717..e697b5752e 100644 --- a/pkg/chains/annotations_test.go +++ b/pkg/chains/annotations_test.go @@ -18,7 +18,7 @@ import ( "github.com/tektoncd/chains/pkg/chains/objects" "github.com/tektoncd/chains/pkg/test/tekton" - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" fakepipelineclient "github.com/tektoncd/pipeline/pkg/client/injection/client/fake" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" rtesting "knative.dev/pkg/reconciler/testing" @@ -78,7 +78,7 @@ func TestReconciled(t *testing.T) { c := fakepipelineclient.Get(ctx) // Test TaskRun - taskRun := objects.NewTaskRunObject(&v1beta1.TaskRun{ + taskRun := objects.NewTaskRunObjectV1(&v1.TaskRun{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ ChainsAnnotation: tt.annotation, @@ -87,7 +87,7 @@ func TestReconciled(t *testing.T) { }) tekton.CreateObject(t, ctx, c, taskRun) - cachedTaskRun := objects.NewTaskRunObject(&v1beta1.TaskRun{ + cachedTaskRun := objects.NewTaskRunObjectV1(&v1.TaskRun{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ ChainsAnnotation: tt.latestAnnotation, @@ -101,7 +101,7 @@ func TestReconciled(t *testing.T) { } // Test PipelineRun - pipelineRun := objects.NewPipelineRunObject(&v1beta1.PipelineRun{ + pipelineRun := objects.NewPipelineRunObjectV1(&v1.PipelineRun{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ ChainsAnnotation: tt.annotation, @@ -110,7 +110,7 @@ func TestReconciled(t *testing.T) { }) tekton.CreateObject(t, ctx, c, pipelineRun) - cachedPipelineRun := objects.NewPipelineRunObject(&v1beta1.PipelineRun{ + cachedPipelineRun := objects.NewPipelineRunObjectV1(&v1.PipelineRun{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ ChainsAnnotation: tt.latestAnnotation, @@ -133,12 +133,12 @@ func TestMarkSigned(t *testing.T) { }{ { name: "mark taskrun", - object: objects.NewTaskRunObject(&v1beta1.TaskRun{ + object: objects.NewTaskRunObjectV1(&v1.TaskRun{ ObjectMeta: metav1.ObjectMeta{ Name: "my-taskrun", }, - Spec: v1beta1.TaskRunSpec{ - TaskRef: &v1beta1.TaskRef{ + Spec: v1.TaskRunSpec{ + TaskRef: &v1.TaskRef{ Name: "foo", }, }, @@ -146,12 +146,12 @@ func TestMarkSigned(t *testing.T) { }, { name: "mark pipelinerun", - object: objects.NewPipelineRunObject(&v1beta1.PipelineRun{ + object: objects.NewPipelineRunObjectV1(&v1.PipelineRun{ ObjectMeta: metav1.ObjectMeta{ Name: "my-pipelinerun", }, - Spec: v1beta1.PipelineRunSpec{ - PipelineRef: &v1beta1.PipelineRef{ + Spec: v1.PipelineRunSpec{ + PipelineRef: &v1.PipelineRef{ Name: "foo", }, }, @@ -212,13 +212,13 @@ func TestMarkFailed(t *testing.T) { }{ { name: "mark taskrun failed", - object: objects.NewTaskRunObject(&v1beta1.TaskRun{ + object: objects.NewTaskRunObjectV1(&v1.TaskRun{ ObjectMeta: metav1.ObjectMeta{ Name: "my-taskrun", Annotations: map[string]string{RetryAnnotation: "3"}, }, - Spec: v1beta1.TaskRunSpec{ - TaskRef: &v1beta1.TaskRef{ + Spec: v1.TaskRunSpec{ + TaskRef: &v1.TaskRef{ Name: "foo", }, }, @@ -226,13 +226,13 @@ func TestMarkFailed(t *testing.T) { }, { name: "mark pipelinerun failed", - object: objects.NewPipelineRunObject(&v1beta1.PipelineRun{ + object: objects.NewPipelineRunObjectV1(&v1.PipelineRun{ ObjectMeta: metav1.ObjectMeta{ Name: "my-pipelinerun", Annotations: map[string]string{RetryAnnotation: "3"}, }, - Spec: v1beta1.PipelineRunSpec{ - PipelineRef: &v1beta1.PipelineRef{ + Spec: v1.PipelineRunSpec{ + PipelineRef: &v1.PipelineRef{ Name: "foo", }, }, @@ -294,23 +294,23 @@ func TestRetryAvailble(t *testing.T) { for _, test := range tests { t.Run(test.description, func(t *testing.T) { // test taskrun - tr := &v1beta1.TaskRun{ + tr := &v1.TaskRun{ ObjectMeta: metav1.ObjectMeta{ Annotations: test.annotations, }, } - trObj := objects.NewTaskRunObject(tr) + trObj := objects.NewTaskRunObjectV1(tr) got := RetryAvailable(trObj) if got != test.expected { t.Fatalf("RetryAvailble() got %v expected %v", got, test.expected) } // test pipelinerun - pr := &v1beta1.PipelineRun{ + pr := &v1.PipelineRun{ ObjectMeta: metav1.ObjectMeta{ Annotations: test.annotations, }, } - prObj := objects.NewPipelineRunObject(pr) + prObj := objects.NewPipelineRunObjectV1(pr) got = RetryAvailable(prObj) if got != test.expected { t.Fatalf("RetryAvailble() got %v expected %v", got, test.expected) @@ -326,13 +326,13 @@ func TestAddRetry(t *testing.T) { }{ { name: "add retry to taskrun", - object: objects.NewTaskRunObject(&v1beta1.TaskRun{ + object: objects.NewTaskRunObjectV1(&v1.TaskRun{ ObjectMeta: metav1.ObjectMeta{Name: "mytaskrun"}, }), }, { name: "add retry to pipelinerun", - object: objects.NewPipelineRunObject(&v1beta1.PipelineRun{ + object: objects.NewPipelineRunObjectV1(&v1.PipelineRun{ ObjectMeta: metav1.ObjectMeta{Name: "mypipelinerun"}, }), }, diff --git a/pkg/chains/formats/slsa/attest/attest.go b/pkg/chains/formats/slsa/attest/attest.go index e0bc380d38..9170806138 100644 --- a/pkg/chains/formats/slsa/attest/attest.go +++ b/pkg/chains/formats/slsa/attest/attest.go @@ -23,6 +23,7 @@ import ( slsa "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2" "github.com/tektoncd/chains/pkg/artifacts" "github.com/tektoncd/chains/pkg/chains/objects" + v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" corev1 "k8s.io/api/core/v1" ) @@ -59,7 +60,7 @@ func Step(step *v1beta1.Step, stepState *v1beta1.StepState) StepAttestation { } func Invocation(obj objects.TektonObject, params []v1beta1.Param, paramSpecs []v1beta1.ParamSpec) slsa.ProvenanceInvocation { - var source *v1beta1.RefSource + var source *v1.RefSource if p := obj.GetProvenance(); p != nil { source = p.RefSource } @@ -108,7 +109,7 @@ func Invocation(obj objects.TektonObject, params []v1beta1.Param, paramSpecs []v return i } -func convertConfigSource(source *v1beta1.RefSource) slsa.ConfigSource { +func convertConfigSource(source *v1.RefSource) slsa.ConfigSource { if source == nil { return slsa.ConfigSource{} } diff --git a/pkg/chains/formats/slsa/extract/extract.go b/pkg/chains/formats/slsa/extract/extract.go index 7a2d093c87..2cc4f4861b 100644 --- a/pkg/chains/formats/slsa/extract/extract.go +++ b/pkg/chains/formats/slsa/extract/extract.go @@ -26,9 +26,11 @@ import ( "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common" "github.com/tektoncd/chains/internal/backport" "github.com/tektoncd/chains/pkg/artifacts" + extractv1beta1 "github.com/tektoncd/chains/pkg/chains/formats/slsa/extract/v1beta1" "github.com/tektoncd/chains/pkg/chains/formats/slsa/internal/artifact" "github.com/tektoncd/chains/pkg/chains/formats/slsa/internal/slsaconfig" "github.com/tektoncd/chains/pkg/chains/objects" + v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" "knative.dev/pkg/logging" ) @@ -46,10 +48,14 @@ func SubjectDigests(ctx context.Context, obj objects.TektonObject, slsaconfig *s var subjects []intoto.Subject switch obj.GetObject().(type) { - case *v1beta1.PipelineRun: + case *v1.PipelineRun: subjects = subjectsFromPipelineRun(ctx, obj, slsaconfig) - case *v1beta1.TaskRun: + case *v1.TaskRun: subjects = subjectsFromTektonObject(ctx, obj) + case *v1beta1.PipelineRun: + subjects = extractv1beta1.SubjectsFromPipelineRunV1Beta1(ctx, obj, slsaconfig) + case *v1beta1.TaskRun: + subjects = extractv1beta1.SubjectsFromTektonObjectV1Beta1(ctx, obj) } return subjects @@ -67,7 +73,7 @@ func subjectsFromPipelineRun(ctx context.Context, obj objects.TektonObject, slsa // If deep inspection is enabled, collect subjects from child taskruns var result []intoto.Subject - pro := obj.(*objects.PipelineRunObject) + pro := obj.(*objects.PipelineRunObjectV1) pSpec := pro.Status.PipelineSpec if pSpec != nil { @@ -135,42 +141,46 @@ func subjectsFromTektonObject(ctx context.Context, obj objects.TektonObject) []i }) } - // Check if object is a Taskrun, if so search for images used in PipelineResources - // Otherwise object is a PipelineRun, where Pipelineresources are not relevant. - // PipelineResources have been deprecated so their support has been left out of - // the POC for TEP-84 - // More info: https://tekton.dev/docs/pipelines/resources/ - tr, ok := obj.GetObject().(*v1beta1.TaskRun) - if !ok || tr.Spec.Resources == nil { - return subjects - } + if trV1, ok := obj.GetObject().(*v1.TaskRun); ok { + trV1Beta1 := &v1beta1.TaskRun{} //nolint:staticcheck + if err := trV1Beta1.ConvertFrom(ctx, trV1); err == nil { + // Check if object is a Taskrun, if so search for images used in PipelineResources + // Otherwise object is a PipelineRun, where Pipelineresources are not relevant. + // PipelineResources have been deprecated so their support has been left out of + // the POC for TEP-84 + // More info: https://tekton.dev/docs/pipelines/resources/ + if !ok || trV1Beta1.Spec.Resources == nil { //nolint:staticcheck + return subjects + } - // go through resourcesResult - for _, output := range tr.Spec.Resources.Outputs { - name := output.Name - if output.PipelineResourceBinding.ResourceSpec == nil { - continue - } - // similarly, we could do this for other pipeline resources or whatever thing replaces them - if output.PipelineResourceBinding.ResourceSpec.Type == backport.PipelineResourceTypeImage { - // get the url and digest, and save as a subject - var url, digest string - for _, s := range tr.Status.ResourcesResult { - if s.ResourceName == name { - if s.Key == "url" { - url = s.Value - } - if s.Key == "digest" { - digest = s.Value + // go through resourcesResult + for _, output := range trV1Beta1.Spec.Resources.Outputs { //nolint:staticcheck + name := output.Name + if output.PipelineResourceBinding.ResourceSpec == nil { + continue + } + // similarly, we could do this for other pipeline resources or whatever thing replaces them + if output.PipelineResourceBinding.ResourceSpec.Type == backport.PipelineResourceTypeImage { + // get the url and digest, and save as a subject + var url, digest string + for _, s := range trV1Beta1.Status.ResourcesResult { + if s.ResourceName == name { + if s.Key == "url" { + url = s.Value + } + if s.Key == "digest" { + digest = s.Value + } + } } + subjects = artifact.AppendSubjects(subjects, intoto.Subject{ + Name: url, + Digest: common.DigestSet{ + "sha256": strings.TrimPrefix(digest, "sha256:"), + }, + }) } } - subjects = artifact.AppendSubjects(subjects, intoto.Subject{ - Name: url, - Digest: common.DigestSet{ - "sha256": strings.TrimPrefix(digest, "sha256:"), - }, - }) } } diff --git a/pkg/chains/formats/slsa/extract/extract_test.go b/pkg/chains/formats/slsa/extract/extract_test.go index 9c7b164679..583a727170 100644 --- a/pkg/chains/formats/slsa/extract/extract_test.go +++ b/pkg/chains/formats/slsa/extract/extract_test.go @@ -28,7 +28,7 @@ import ( "github.com/tektoncd/chains/pkg/chains/formats/slsa/internal/compare" "github.com/tektoncd/chains/pkg/chains/formats/slsa/internal/slsaconfig" "github.com/tektoncd/chains/pkg/chains/objects" - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" logtesting "knative.dev/pkg/logging/testing" ) @@ -208,7 +208,7 @@ func TestPipelineRunObserveModeForSubjects(t *testing.T) { { name: "deep inspection enabled: pipelinerun and taskrun have duplicated results", pro: createProWithTaskRunResults( - createProWithPipelineResults(map[string]string{artifactURL1: "sha256:" + artifactDigest1}).(*objects.PipelineRunObject), + createProWithPipelineResults(map[string]string{artifactURL1: "sha256:" + artifactDigest1}).(*objects.PipelineRunObjectV1), []artifact{ {uri: artifactURL1, digest: "sha256:" + artifactDigest1}, }), @@ -228,7 +228,7 @@ func TestPipelineRunObserveModeForSubjects(t *testing.T) { { name: "deep inspection enabled: pipelinerun and taskrun have different results", pro: createProWithTaskRunResults( - createProWithPipelineResults(map[string]string{artifactURL1: "sha256:" + artifactDigest1}).(*objects.PipelineRunObject), + createProWithPipelineResults(map[string]string{artifactURL1: "sha256:" + artifactDigest1}).(*objects.PipelineRunObjectV1), []artifact{ {uri: artifactURL2, digest: "sha256:" + artifactDigest2}, }), @@ -272,21 +272,21 @@ func TestPipelineRunObserveModeForSubjects(t *testing.T) { } func createTaskRunObjectWithResults(results map[string]string) objects.TektonObject { - trResults := []v1beta1.TaskRunResult{} + trResults := []v1.TaskRunResult{} prefix := 0 for url, digest := range results { trResults = append(trResults, - v1beta1.TaskRunResult{Name: fmt.Sprintf("%v_IMAGE_DIGEST", prefix), Value: *v1beta1.NewStructuredValues(digest)}, - v1beta1.TaskRunResult{Name: fmt.Sprintf("%v_IMAGE_URL", prefix), Value: *v1beta1.NewStructuredValues(url)}, + v1.TaskRunResult{Name: fmt.Sprintf("%v_IMAGE_DIGEST", prefix), Value: *v1.NewStructuredValues(digest)}, + v1.TaskRunResult{Name: fmt.Sprintf("%v_IMAGE_URL", prefix), Value: *v1.NewStructuredValues(url)}, ) prefix++ } - return objects.NewTaskRunObject( - &v1beta1.TaskRun{ - Status: v1beta1.TaskRunStatus{ - TaskRunStatusFields: v1beta1.TaskRunStatusFields{ - TaskRunResults: trResults, + return objects.NewTaskRunObjectV1( + &v1.TaskRun{ + Status: v1.TaskRunStatus{ + TaskRunStatusFields: v1.TaskRunStatusFields{ + Results: trResults, }, }, }, @@ -294,21 +294,21 @@ func createTaskRunObjectWithResults(results map[string]string) objects.TektonObj } func createProWithPipelineResults(results map[string]string) objects.TektonObject { - prResults := []v1beta1.PipelineRunResult{} + prResults := []v1.PipelineRunResult{} prefix := 0 for url, digest := range results { prResults = append(prResults, - v1beta1.PipelineRunResult{Name: fmt.Sprintf("%v_IMAGE_DIGEST", prefix), Value: *v1beta1.NewStructuredValues(digest)}, - v1beta1.PipelineRunResult{Name: fmt.Sprintf("%v_IMAGE_URL", prefix), Value: *v1beta1.NewStructuredValues(url)}, + v1.PipelineRunResult{Name: fmt.Sprintf("%v_IMAGE_DIGEST", prefix), Value: *v1.NewStructuredValues(digest)}, + v1.PipelineRunResult{Name: fmt.Sprintf("%v_IMAGE_URL", prefix), Value: *v1.NewStructuredValues(url)}, ) prefix++ } - return objects.NewPipelineRunObject( - &v1beta1.PipelineRun{ - Status: v1beta1.PipelineRunStatus{ - PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{ - PipelineResults: prResults, + return objects.NewPipelineRunObjectV1( + &v1.PipelineRun{ + Status: v1.PipelineRunStatus{ + PipelineRunStatusFields: v1.PipelineRunStatusFields{ + Results: prResults, }, }, }, @@ -323,19 +323,19 @@ type artifact struct { // create a child taskrun for each result // //nolint:all -func createProWithTaskRunResults(pro *objects.PipelineRunObject, results []artifact) objects.TektonObject { +func createProWithTaskRunResults(pro *objects.PipelineRunObjectV1, results []artifact) objects.TektonObject { if pro == nil { - pro = objects.NewPipelineRunObject(&v1beta1.PipelineRun{ - Status: v1beta1.PipelineRunStatus{ - PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{ - PipelineSpec: &v1beta1.PipelineSpec{}, + pro = objects.NewPipelineRunObjectV1(&v1.PipelineRun{ + Status: v1.PipelineRunStatus{ + PipelineRunStatusFields: v1.PipelineRunStatusFields{ + PipelineSpec: &v1.PipelineSpec{}, }, }, }) } if pro.Status.PipelineSpec == nil { - pro.Status.PipelineSpec = &v1beta1.PipelineSpec{} + pro.Status.PipelineSpec = &v1.PipelineSpec{} } // create child taskruns with results and pipelinetask @@ -343,21 +343,21 @@ func createProWithTaskRunResults(pro *objects.PipelineRunObject, results []artif for _, r := range results { // simulate child taskruns pipelineTaskName := fmt.Sprintf("task-%d", prefix) - tr := &v1beta1.TaskRun{ + tr := &v1.TaskRun{ ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{objects.PipelineTaskLabel: pipelineTaskName}}, - Status: v1beta1.TaskRunStatus{ - TaskRunStatusFields: v1beta1.TaskRunStatusFields{ + Status: v1.TaskRunStatus{ + TaskRunStatusFields: v1.TaskRunStatusFields{ CompletionTime: &metav1.Time{Time: time.Date(1995, time.December, 24, 6, 12, 12, 24, time.UTC)}, - TaskRunResults: []v1beta1.TaskRunResult{ - {Name: fmt.Sprintf("%v_IMAGE_DIGEST", prefix), Value: *v1beta1.NewStructuredValues(r.digest)}, - {Name: fmt.Sprintf("%v_IMAGE_URL", prefix), Value: *v1beta1.NewStructuredValues(r.uri)}, + Results: []v1.TaskRunResult{ + {Name: fmt.Sprintf("%v_IMAGE_DIGEST", prefix), Value: *v1.NewStructuredValues(r.digest)}, + {Name: fmt.Sprintf("%v_IMAGE_URL", prefix), Value: *v1.NewStructuredValues(r.uri)}, }, }, }, } pro.AppendTaskRun(tr) - pro.Status.PipelineSpec.Tasks = append(pro.Status.PipelineSpec.Tasks, v1beta1.PipelineTask{Name: pipelineTaskName}) + pro.Status.PipelineSpec.Tasks = append(pro.Status.PipelineSpec.Tasks, v1.PipelineTask{Name: pipelineTaskName}) prefix++ } diff --git a/pkg/chains/formats/slsa/extract/v1beta1/extract.go b/pkg/chains/formats/slsa/extract/v1beta1/extract.go new file mode 100644 index 0000000000..cb630ba26f --- /dev/null +++ b/pkg/chains/formats/slsa/extract/v1beta1/extract.go @@ -0,0 +1,192 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "context" + "fmt" + "strings" + + "github.com/google/go-containerregistry/pkg/name" + intoto "github.com/in-toto/in-toto-golang/in_toto" + "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common" + "github.com/tektoncd/chains/internal/backport" + "github.com/tektoncd/chains/pkg/artifacts" + "github.com/tektoncd/chains/pkg/chains/formats/slsa/internal/artifact" + "github.com/tektoncd/chains/pkg/chains/formats/slsa/internal/slsaconfig" + "github.com/tektoncd/chains/pkg/chains/objects" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + "knative.dev/pkg/logging" +) + +// SubjectDigests returns software artifacts produced from the TaskRun/PipelineRun object +// in the form of standard subject field of intoto statement. +// The type hinting fields expected in results help identify the generated software artifacts. +// Valid type hinting fields must: +// - have suffix `IMAGE_URL` & `IMAGE_DIGEST` or `ARTIFACT_URI` & `ARTIFACT_DIGEST` pair. +// - the `*_DIGEST` field must be in the format of ":" where the algorithm must be "sha256" and actual sha must be valid per https://github.com/opencontainers/image-spec/blob/main/descriptor.md#sha-256. +// - the `*_URL` or `*_URI` fields cannot be empty. +// +//nolint:all +func SubjectDigests(ctx context.Context, obj objects.TektonObject, slsaconfig *slsaconfig.SlsaConfig) []intoto.Subject { + var subjects []intoto.Subject + + switch obj.GetObject().(type) { + case *v1beta1.PipelineRun: + subjects = SubjectsFromPipelineRunV1Beta1(ctx, obj, slsaconfig) + case *v1beta1.TaskRun: + subjects = SubjectsFromTektonObjectV1Beta1(ctx, obj) + } + + return subjects +} + +func SubjectsFromPipelineRunV1Beta1(ctx context.Context, obj objects.TektonObject, slsaconfig *slsaconfig.SlsaConfig) []intoto.Subject { + prSubjects := SubjectsFromTektonObjectV1Beta1(ctx, obj) + + // If deep inspection is not enabled, just return subjects observed on the pipelinerun level + if !slsaconfig.DeepInspectionEnabled { + return prSubjects + } + + logger := logging.FromContext(ctx) + // If deep inspection is enabled, collect subjects from child taskruns + var result []intoto.Subject + + pro := obj.(*objects.PipelineRunObjectV1Beta1) + + pSpec := pro.Status.PipelineSpec + if pSpec != nil { + pipelineTasks := append(pSpec.Tasks, pSpec.Finally...) + for _, t := range pipelineTasks { + tr := pro.GetTaskRunFromTask(t.Name) + // Ignore Tasks that did not execute during the PipelineRun. + if tr == nil || tr.Status.CompletionTime == nil { + logger.Infof("taskrun status not found for task %s", t.Name) + continue + } + trSubjects := SubjectsFromTektonObjectV1Beta1(ctx, tr) + result = artifact.AppendSubjects(result, trSubjects...) + } + } + + // also add subjects observed from pipelinerun level with duplication removed + result = artifact.AppendSubjects(result, prSubjects...) + + return result +} + +func SubjectsFromTektonObjectV1Beta1(ctx context.Context, obj objects.TektonObject) []intoto.Subject { + logger := logging.FromContext(ctx) + var subjects []intoto.Subject + + imgs := artifacts.ExtractOCIImagesFromResults(ctx, obj) + for _, i := range imgs { + if d, ok := i.(name.Digest); ok { + subjects = artifact.AppendSubjects(subjects, intoto.Subject{ + Name: d.Repository.Name(), + Digest: common.DigestSet{ + "sha256": strings.TrimPrefix(d.DigestStr(), "sha256:"), + }, + }) + } + } + + sts := artifacts.ExtractSignableTargetFromResults(ctx, obj) + for _, obj := range sts { + splits := strings.Split(obj.Digest, ":") + if len(splits) != 2 { + logger.Errorf("Digest %s should be in the format of: algorthm:abc", obj.Digest) + continue + } + subjects = artifact.AppendSubjects(subjects, intoto.Subject{ + Name: obj.URI, + Digest: common.DigestSet{ + splits[0]: splits[1], + }, + }) + } + + ssts := artifacts.ExtractStructuredTargetFromResults(ctx, obj, artifacts.ArtifactsOutputsResultName) + for _, s := range ssts { + splits := strings.Split(s.Digest, ":") + alg := splits[0] + digest := splits[1] + subjects = artifact.AppendSubjects(subjects, intoto.Subject{ + Name: s.URI, + Digest: common.DigestSet{ + alg: digest, + }, + }) + } + + // Check if object is a Taskrun, if so search for images used in PipelineResources + // Otherwise object is a PipelineRun, where Pipelineresources are not relevant. + // PipelineResources have been deprecated so their support has been left out of + // the POC for TEP-84 + // More info: https://tekton.dev/docs/pipelines/resources/ + tr, ok := obj.GetObject().(*v1beta1.TaskRun) //nolint:staticcheck + if !ok || tr.Spec.Resources == nil { //nolint:staticcheck + return subjects + } + + // go through resourcesResult + for _, output := range tr.Spec.Resources.Outputs { //nolint:staticcheck + name := output.Name + if output.PipelineResourceBinding.ResourceSpec == nil { + continue + } + // similarly, we could do this for other pipeline resources or whatever thing replaces them + if output.PipelineResourceBinding.ResourceSpec.Type == backport.PipelineResourceTypeImage { + // get the url and digest, and save as a subject + var url, digest string + for _, s := range tr.Status.ResourcesResult { + if s.ResourceName == name { + if s.Key == "url" { + url = s.Value + } + if s.Key == "digest" { + digest = s.Value + } + } + } + subjects = artifact.AppendSubjects(subjects, intoto.Subject{ + Name: url, + Digest: common.DigestSet{ + "sha256": strings.TrimPrefix(digest, "sha256:"), + }, + }) + } + } + return subjects +} + +// RetrieveAllArtifactURIs returns all the URIs of the software artifacts produced from the run object. +// - It first extracts intoto subjects from run object results and converts the subjects +// to a slice of string URIs in the format of "NAME" + "@" + "ALGORITHM" + ":" + "DIGEST". +// - If no subjects could be extracted from results, then an empty slice is returned. +func RetrieveAllArtifactURIs(ctx context.Context, obj objects.TektonObject, deepInspectionEnabled bool) []string { + result := []string{} + subjects := SubjectDigests(ctx, obj, &slsaconfig.SlsaConfig{DeepInspectionEnabled: deepInspectionEnabled}) + + for _, s := range subjects { + for algo, digest := range s.Digest { + result = append(result, fmt.Sprintf("%s@%s:%s", s.Name, algo, digest)) + } + } + return result +} diff --git a/pkg/chains/formats/slsa/extract/v1beta1/extract_test.go b/pkg/chains/formats/slsa/extract/v1beta1/extract_test.go new file mode 100644 index 0000000000..0e1cd2dd0c --- /dev/null +++ b/pkg/chains/formats/slsa/extract/v1beta1/extract_test.go @@ -0,0 +1,365 @@ +/* +Copyright 2022 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1_test + +import ( + "fmt" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + intoto "github.com/in-toto/in-toto-golang/in_toto" + extractv1beta1 "github.com/tektoncd/chains/pkg/chains/formats/slsa/extract/v1beta1" + "github.com/tektoncd/chains/pkg/chains/formats/slsa/internal/compare" + "github.com/tektoncd/chains/pkg/chains/formats/slsa/internal/slsaconfig" + "github.com/tektoncd/chains/pkg/chains/objects" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + logtesting "knative.dev/pkg/logging/testing" +) + +const ( + artifactURL1 = "gcr.io/test/kaniko-chains1" + artifactDigest1 = "a2e500bebfe16cf12fc56316ba72c645e1d29054541dc1ab6c286197434170a9" + artifactURL2 = "us-central1-maven.pkg.dev/test/java" + artifactDigest2 = "b2e500bebfe16cf12fc56316ba72c645e1d29054541dc1ab6c286197434170a9" +) + +func TestSubjectDigestsAndRetrieveAllArtifactURIs(t *testing.T) { + var tests = []struct { + name string + // a map of url:digest pairs for type hinting results + results map[string]string + wantSubjects []intoto.Subject + wantFullURLs []string + }{ + { + name: "valid type hinting result fields", + results: map[string]string{ + artifactURL1: "sha256:" + artifactDigest1, + artifactURL2: "sha256:" + artifactDigest2, + }, + wantSubjects: []intoto.Subject{ + { + Name: artifactURL1, + Digest: map[string]string{ + "sha256": artifactDigest1, + }, + }, + { + Name: artifactURL2, + Digest: map[string]string{ + "sha256": artifactDigest2, + }, + }, + }, + wantFullURLs: []string{ + fmt.Sprintf("%s@sha256:%s", artifactURL1, artifactDigest1), + fmt.Sprintf("%s@sha256:%s", artifactURL2, artifactDigest2), + }, + }, + { + name: "invalid/missing digest algorithm name", + results: map[string]string{ + artifactURL1: "sha1:" + artifactDigest1, + artifactURL2: artifactDigest2, + }, + wantSubjects: nil, + wantFullURLs: []string{}, + }, + { + name: "invalid digest sha", + results: map[string]string{ + artifactURL1: "sha256:a123", + }, + wantSubjects: nil, + wantFullURLs: []string{}, + }, + { + name: "invalid url value", + results: map[string]string{ + "": "sha256:" + artifactDigest1, + }, + wantSubjects: nil, + wantFullURLs: []string{}, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctx := logtesting.TestContextWithLogger(t) + // test both taskrun object and pipelinerun object + runObjects := []objects.TektonObject{ + createTaskRunObjectV1Beta1WithResults(tc.results), + createProWithPipelineResults(tc.results), + } + for _, o := range runObjects { + gotSubjects := extractv1beta1.SubjectDigests(ctx, o, &slsaconfig.SlsaConfig{DeepInspectionEnabled: false}) + if diff := cmp.Diff(tc.wantSubjects, gotSubjects, compare.SubjectCompareOption()); diff != "" { + t.Errorf("Wrong subjects extracted, diff=%s", diff) + } + + gotURIs := extractv1beta1.RetrieveAllArtifactURIs(ctx, o, false) + if diff := cmp.Diff(tc.wantFullURLs, gotURIs, cmpopts.SortSlices(func(x, y string) bool { return x < y })); diff != "" { + t.Errorf("Wrong URIs extracted, diff=%s", diff) + } + } + + }) + } +} + +func TestPipelineRunObserveModeForSubjects(t *testing.T) { + var tests = []struct { + name string + pro objects.TektonObject + deepInspectionEnabled bool + wantSubjects []intoto.Subject + wantFullURLs []string + }{ + { + name: "deep inspection disabled", + pro: createProWithPipelineResults(map[string]string{artifactURL1: "sha256:" + artifactDigest1}), + deepInspectionEnabled: false, + wantSubjects: []intoto.Subject{ + { + Name: artifactURL1, + Digest: map[string]string{ + "sha256": artifactDigest1, + }, + }, + }, + wantFullURLs: []string{fmt.Sprintf("%s@sha256:%s", artifactURL1, artifactDigest1)}, + }, + { + name: "deep inspection enabled: no duplication", + pro: createProWithTaskRunResults(nil, []artifact{{uri: artifactURL2, digest: "sha256:" + artifactDigest2}}), + deepInspectionEnabled: true, + wantSubjects: []intoto.Subject{ + { + Name: artifactURL2, + Digest: map[string]string{ + "sha256": artifactDigest2, + }, + }, + }, + wantFullURLs: []string{fmt.Sprintf("%s@sha256:%s", artifactURL2, artifactDigest2)}, + }, + { + name: "deep inspection enabled: 2 tasks have same uri with different sha256 digests", + pro: createProWithTaskRunResults(nil, []artifact{ + {uri: artifactURL2, digest: "sha256:" + artifactDigest1}, + {uri: artifactURL2, digest: "sha256:" + artifactDigest2}, + }), + deepInspectionEnabled: true, + wantSubjects: []intoto.Subject{ + { + Name: artifactURL2, + Digest: map[string]string{ + "sha256": artifactDigest2, + }, + }, + { + Name: artifactURL2, + Digest: map[string]string{ + "sha256": artifactDigest1, + }, + }, + }, + wantFullURLs: []string{ + fmt.Sprintf("%s@sha256:%s", artifactURL2, artifactDigest1), + fmt.Sprintf("%s@sha256:%s", artifactURL2, artifactDigest2), + }, + }, + { + name: "deep inspection enabled: 2 taskruns have same uri with same sha256 digests", + pro: createProWithTaskRunResults(nil, []artifact{ + {uri: artifactURL2, digest: "sha256:" + artifactDigest2}, + {uri: artifactURL2, digest: "sha256:" + artifactDigest2}, + }), + deepInspectionEnabled: true, + wantSubjects: []intoto.Subject{ + { + Name: artifactURL2, + Digest: map[string]string{ + "sha256": artifactDigest2, + }, + }, + }, + wantFullURLs: []string{ + fmt.Sprintf("%s@sha256:%s", artifactURL2, artifactDigest2), + }, + }, + { + name: "deep inspection enabled: pipelinerun and taskrun have duplicated results", + pro: createProWithTaskRunResults( + createProWithPipelineResults(map[string]string{artifactURL1: "sha256:" + artifactDigest1}).(*objects.PipelineRunObjectV1Beta1), + []artifact{ + {uri: artifactURL1, digest: "sha256:" + artifactDigest1}, + }), + deepInspectionEnabled: true, + wantSubjects: []intoto.Subject{ + { + Name: artifactURL1, + Digest: map[string]string{ + "sha256": artifactDigest1, + }, + }, + }, + wantFullURLs: []string{ + fmt.Sprintf("%s@sha256:%s", artifactURL1, artifactDigest1), + }, + }, + { + name: "deep inspection enabled: pipelinerun and taskrun have different results", + pro: createProWithTaskRunResults( + createProWithPipelineResults(map[string]string{artifactURL1: "sha256:" + artifactDigest1}).(*objects.PipelineRunObjectV1Beta1), + []artifact{ + {uri: artifactURL2, digest: "sha256:" + artifactDigest2}, + }), + deepInspectionEnabled: true, + wantSubjects: []intoto.Subject{ + { + Name: artifactURL1, + Digest: map[string]string{ + "sha256": artifactDigest1, + }, + }, + { + Name: artifactURL2, + Digest: map[string]string{ + "sha256": artifactDigest2, + }, + }, + }, + wantFullURLs: []string{ + fmt.Sprintf("%s@sha256:%s", artifactURL1, artifactDigest1), + fmt.Sprintf("%s@sha256:%s", artifactURL2, artifactDigest2), + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctx := logtesting.TestContextWithLogger(t) + + gotSubjects := extractv1beta1.SubjectDigests(ctx, tc.pro, &slsaconfig.SlsaConfig{DeepInspectionEnabled: tc.deepInspectionEnabled}) + if diff := cmp.Diff(tc.wantSubjects, gotSubjects, compare.SubjectCompareOption()); diff != "" { + t.Errorf("Wrong subjects extracted, diff=%s, %s", diff, gotSubjects) + } + + gotURIs := extractv1beta1.RetrieveAllArtifactURIs(ctx, tc.pro, tc.deepInspectionEnabled) + if diff := cmp.Diff(tc.wantFullURLs, gotURIs, cmpopts.SortSlices(func(x, y string) bool { return x < y })); diff != "" { + t.Errorf("Wrong URIs extracted, diff=%s", diff) + } + }) + } +} + +func createTaskRunObjectV1Beta1WithResults(results map[string]string) objects.TektonObject { + trResults := []v1beta1.TaskRunResult{} + prefix := 0 + for url, digest := range results { + trResults = append(trResults, + v1beta1.TaskRunResult{Name: fmt.Sprintf("%v_IMAGE_DIGEST", prefix), Value: *v1beta1.NewStructuredValues(digest)}, + v1beta1.TaskRunResult{Name: fmt.Sprintf("%v_IMAGE_URL", prefix), Value: *v1beta1.NewStructuredValues(url)}, + ) + prefix++ + } + + return objects.NewTaskRunObjectV1Beta1( + &v1beta1.TaskRun{ //nolint:staticcheck + Status: v1beta1.TaskRunStatus{ + TaskRunStatusFields: v1beta1.TaskRunStatusFields{ + TaskRunResults: trResults, + }, + }, + }, + ) +} + +func createProWithPipelineResults(results map[string]string) objects.TektonObject { + prResults := []v1beta1.PipelineRunResult{} + prefix := 0 + for url, digest := range results { + prResults = append(prResults, + v1beta1.PipelineRunResult{Name: fmt.Sprintf("%v_IMAGE_DIGEST", prefix), Value: *v1beta1.NewStructuredValues(digest)}, + v1beta1.PipelineRunResult{Name: fmt.Sprintf("%v_IMAGE_URL", prefix), Value: *v1beta1.NewStructuredValues(url)}, + ) + prefix++ + } + + return objects.NewPipelineRunObjectV1Beta1( + &v1beta1.PipelineRun{ //nolint:staticcheck + Status: v1beta1.PipelineRunStatus{ + PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{ + PipelineResults: prResults, + }, + }, + }, + ) +} + +type artifact struct { + uri string + digest string +} + +// create a child taskrun for each result +// +//nolint:all +func createProWithTaskRunResults(pro *objects.PipelineRunObjectV1Beta1, results []artifact) objects.TektonObject { + if pro == nil { + pro = objects.NewPipelineRunObjectV1Beta1(&v1beta1.PipelineRun{ //nolint:staticcheck + Status: v1beta1.PipelineRunStatus{ + PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{ + PipelineSpec: &v1beta1.PipelineSpec{}, + }, + }, + }) + } + + if pro.Status.PipelineSpec == nil { + pro.Status.PipelineSpec = &v1beta1.PipelineSpec{} + } + + // create child taskruns with results and pipelinetask + prefix := 0 + for _, r := range results { + // simulate child taskruns + pipelineTaskName := fmt.Sprintf("task-%d", prefix) + tr := &v1beta1.TaskRun{ //nolint:staticcheck + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{objects.PipelineTaskLabel: pipelineTaskName}}, + Status: v1beta1.TaskRunStatus{ + TaskRunStatusFields: v1beta1.TaskRunStatusFields{ + CompletionTime: &metav1.Time{Time: time.Date(1995, time.December, 24, 6, 12, 12, 24, time.UTC)}, + TaskRunResults: []v1beta1.TaskRunResult{ + {Name: fmt.Sprintf("%v_IMAGE_DIGEST", prefix), Value: *v1beta1.NewStructuredValues(r.digest)}, + {Name: fmt.Sprintf("%v_IMAGE_URL", prefix), Value: *v1beta1.NewStructuredValues(r.uri)}, + }, + }, + }, + } + + pro.AppendTaskRun(tr) + pro.Status.PipelineSpec.Tasks = append(pro.Status.PipelineSpec.Tasks, v1beta1.PipelineTask{Name: pipelineTaskName}) + prefix++ + } + + return pro +} diff --git a/pkg/chains/formats/slsa/internal/material/material.go b/pkg/chains/formats/slsa/internal/material/material.go index 864dc14ab4..6c29f4fe93 100644 --- a/pkg/chains/formats/slsa/internal/material/material.go +++ b/pkg/chains/formats/slsa/internal/material/material.go @@ -28,6 +28,8 @@ import ( "github.com/tektoncd/chains/pkg/chains/formats/slsa/internal/artifact" "github.com/tektoncd/chains/pkg/chains/formats/slsa/internal/slsaconfig" "github.com/tektoncd/chains/pkg/chains/objects" + v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" "knative.dev/pkg/logging" ) @@ -37,7 +39,7 @@ const ( ) // TaskMaterials constructs `predicate.materials` section by collecting all the artifacts that influence a taskrun such as source code repo and step&sidecar base images. -func TaskMaterials(ctx context.Context, tro *objects.TaskRunObject) ([]common.ProvenanceMaterial, error) { +func TaskMaterials(ctx context.Context, tro *objects.TaskRunObjectV1) ([]common.ProvenanceMaterial, error) { var mats []common.ProvenanceMaterial // add step images @@ -56,13 +58,16 @@ func TaskMaterials(ctx context.Context, tro *objects.TaskRunObject) ([]common.Pr mats = artifact.AppendMaterials(mats, FromTaskParamsAndResults(ctx, tro)...) - // add task resources - mats = artifact.AppendMaterials(mats, FromTaskResources(ctx, tro)...) + // convert to v1beta1 and add any task resources + trV1Beta1 := &v1beta1.TaskRun{} //nolint:staticcheck + if err := trV1Beta1.ConvertFrom(ctx, tro.GetObject().(*v1.TaskRun)); err == nil { + mats = artifact.AppendMaterials(mats, FromTaskResources(ctx, trV1Beta1)...) + } return mats, nil } -func PipelineMaterials(ctx context.Context, pro *objects.PipelineRunObject, slsaconfig *slsaconfig.SlsaConfig) ([]common.ProvenanceMaterial, error) { +func PipelineMaterials(ctx context.Context, pro *objects.PipelineRunObjectV1, slsaconfig *slsaconfig.SlsaConfig) ([]common.ProvenanceMaterial, error) { logger := logging.FromContext(ctx) var mats []common.ProvenanceMaterial if p := pro.Status.Provenance; p != nil && p.RefSource != nil { @@ -113,7 +118,7 @@ func PipelineMaterials(ctx context.Context, pro *objects.PipelineRunObject, slsa } // FromStepImages gets predicate.materials from step images -func FromStepImages(tro *objects.TaskRunObject) ([]common.ProvenanceMaterial, error) { +func FromStepImages(tro *objects.TaskRunObjectV1) ([]common.ProvenanceMaterial, error) { mats := []common.ProvenanceMaterial{} for _, image := range tro.GetStepImages() { m, err := fromImageID(image) @@ -126,7 +131,7 @@ func FromStepImages(tro *objects.TaskRunObject) ([]common.ProvenanceMaterial, er } // FromSidecarImages gets predicate.materials from sidecar images -func FromSidecarImages(tro *objects.TaskRunObject) ([]common.ProvenanceMaterial, error) { +func FromSidecarImages(tro *objects.TaskRunObjectV1) ([]common.ProvenanceMaterial, error) { mats := []common.ProvenanceMaterial{} for _, image := range tro.GetSidecarImages() { m, err := fromImageID(image) @@ -158,11 +163,11 @@ func fromImageID(imageID string) (common.ProvenanceMaterial, error) { } // FromTaskResourcesToMaterials gets materials from task resources. -func FromTaskResources(ctx context.Context, tro *objects.TaskRunObject) []common.ProvenanceMaterial { +func FromTaskResources(ctx context.Context, tr *v1beta1.TaskRun) []common.ProvenanceMaterial { //nolint:staticcheck mats := []common.ProvenanceMaterial{} - if tro.Spec.Resources != nil { //nolint:all //incompatible with pipelines v0.45 + if tr.Spec.Resources != nil { //nolint:all //incompatible with pipelines v0.45 // check for a Git PipelineResource - for _, input := range tro.Spec.Resources.Inputs { //nolint:all //incompatible with pipelines v0.45 + for _, input := range tr.Spec.Resources.Inputs { //nolint:all //incompatible with pipelines v0.45 if input.ResourceSpec == nil || input.ResourceSpec.Type != backport.PipelineResourceTypeGit { //nolint:all //incompatible with pipelines v0.45 continue } @@ -171,7 +176,7 @@ func FromTaskResources(ctx context.Context, tro *objects.TaskRunObject) []common Digest: common.DigestSet{}, } - for _, rr := range tro.Status.ResourcesResult { + for _, rr := range tr.Status.ResourcesResult { if rr.ResourceName != input.Name { continue } @@ -202,7 +207,7 @@ func FromTaskResources(ctx context.Context, tro *objects.TaskRunObject) []common // FromTaskParamsAndResults scans over the taskrun, taskspec params and taskrun results // and looks for unstructured type hinted names matching CHAINS-GIT_COMMIT and CHAINS-GIT_URL // to extract the commit and url value for input artifact materials. -func FromTaskParamsAndResults(ctx context.Context, tro *objects.TaskRunObject) []common.ProvenanceMaterial { +func FromTaskParamsAndResults(ctx context.Context, tro *objects.TaskRunObjectV1) []common.ProvenanceMaterial { var commit, url string // Scan for git params to use for materials if tro.Status.TaskSpec != nil { @@ -230,7 +235,7 @@ func FromTaskParamsAndResults(ctx context.Context, tro *objects.TaskRunObject) [ } } - for _, r := range tro.Status.TaskRunResults { + for _, r := range tro.Status.Results { if r.Name == attest.CommitParam { commit = r.Value.StringVal } @@ -257,7 +262,7 @@ func FromTaskParamsAndResults(ctx context.Context, tro *objects.TaskRunObject) [ } // FromPipelineParamsAndResults extracts type hinted params and results and adds the url and digest to materials. -func FromPipelineParamsAndResults(ctx context.Context, pro *objects.PipelineRunObject, slsaconfig *slsaconfig.SlsaConfig) []common.ProvenanceMaterial { +func FromPipelineParamsAndResults(ctx context.Context, pro *objects.PipelineRunObjectV1, slsaconfig *slsaconfig.SlsaConfig) []common.ProvenanceMaterial { mats := []common.ProvenanceMaterial{} sms := artifacts.RetrieveMaterialsFromStructuredResults(ctx, pro, artifacts.ArtifactsInputsResultName) mats = artifact.AppendMaterials(mats, sms...) @@ -308,8 +313,8 @@ func FromPipelineParamsAndResults(ctx context.Context, pro *objects.PipelineRunO } } - // search status.PipelineRunResults - for _, r := range pro.Status.PipelineResults { + // search status.Results + for _, r := range pro.Status.Results { if r.Name == attest.CommitParam { commit = r.Value.StringVal } diff --git a/pkg/chains/formats/slsa/internal/material/material_test.go b/pkg/chains/formats/slsa/internal/material/material_test.go index 2f86b45f37..9bd827aa8e 100644 --- a/pkg/chains/formats/slsa/internal/material/material_test.go +++ b/pkg/chains/formats/slsa/internal/material/material_test.go @@ -31,57 +31,56 @@ import ( "github.com/tektoncd/chains/pkg/chains/formats/slsa/internal/slsaconfig" "github.com/tektoncd/chains/pkg/chains/objects" "github.com/tektoncd/chains/pkg/internal/objectloader" + v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" logtesting "knative.dev/pkg/logging/testing" - "sigs.k8s.io/yaml" ) const digest = "sha256:05f95b26ed10668b7183c1e2da98610e91372fa9f510046d4ce5812addad86b7" -func createPro(path string) *objects.PipelineRunObject { +func createPro(path string) *objects.PipelineRunObjectV1 { var err error pr, err := objectloader.PipelineRunFromFile(path) if err != nil { panic(err) } - tr1, err := objectloader.TaskRunFromFile("../../testdata/taskrun1.json") + tr1, err := objectloader.TaskRunFromFile("../../testdata/pipeline-v1/taskrun1.json") if err != nil { panic(err) } - tr2, err := objectloader.TaskRunFromFile("../../testdata/taskrun2.json") + tr2, err := objectloader.TaskRunFromFile("../../testdata/pipeline-v1/taskrun2.json") if err != nil { panic(err) } - p := objects.NewPipelineRunObject(pr) + p := objects.NewPipelineRunObjectV1(pr) p.AppendTaskRun(tr1) p.AppendTaskRun(tr2) return p } -func TestMaterialsWithTaskRunResults(t *testing.T) { - // make sure this works with Git resources - taskrun := `apiVersion: tekton.dev/v1beta1 -kind: TaskRun -spec: - taskSpec: - resources: - inputs: - - name: repo - type: git -status: - taskResults: - - name: CHAINS-GIT_COMMIT - value: 50c56a48cfb3a5a80fa36ed91c739bdac8381cbe - - name: CHAINS-GIT_URL - value: https://github.com/GoogleContainerTools/distroless` - - var taskRun *v1beta1.TaskRun - if err := yaml.Unmarshal([]byte(taskrun), &taskRun); err != nil { - t.Fatal(err) +func TestMaterialsWithResults(t *testing.T) { + taskRun := &v1.TaskRun{ + Status: v1.TaskRunStatus{ + TaskRunStatusFields: v1.TaskRunStatusFields{ + Results: []v1.TaskRunResult{ + { + Name: "CHAINS-GIT_COMMIT", + Value: v1.ParamValue{ + StringVal: "50c56a48cfb3a5a80fa36ed91c739bdac8381cbe", + }, + }, + { + Name: "CHAINS-GIT_URL", + Value: v1.ParamValue{ + StringVal: "https://github.com/GoogleContainerTools/distroless", + }, + }, + }, + }, + }, } - want := []common.ProvenanceMaterial{ { URI: artifacts.GitSchemePrefix + "https://github.com/GoogleContainerTools/distroless.git", @@ -92,7 +91,7 @@ status: } ctx := logtesting.TestContextWithLogger(t) - got, err := TaskMaterials(ctx, objects.NewTaskRunObject(taskRun)) + got, err := TaskMaterials(ctx, objects.NewTaskRunObjectV1(taskRun)) if err != nil { t.Fatalf("Did not expect an error but got %v", err) } @@ -103,232 +102,252 @@ status: func TestTaskMaterials(t *testing.T) { tests := []struct { - name string - taskRun *v1beta1.TaskRun - want []common.ProvenanceMaterial - }{{ - name: "materials from pipeline resources", - taskRun: &v1beta1.TaskRun{ - Spec: v1beta1.TaskRunSpec{ - Resources: &v1beta1.TaskRunResources{ - Inputs: []v1beta1.TaskResourceBinding{ - { - PipelineResourceBinding: v1beta1.PipelineResourceBinding{ - Name: "nil-resource-spec", - }, - }, { - PipelineResourceBinding: v1beta1.PipelineResourceBinding{ - Name: "repo", - ResourceSpec: &v1alpha1.PipelineResourceSpec{ - Params: []v1alpha1.ResourceParam{ - {Name: "url", Value: "https://github.com/GoogleContainerTools/distroless"}, - {Name: "revision", Value: "my-revision"}, + name string + obj objects.TektonObject + want []common.ProvenanceMaterial + }{ + { + name: "materials from pipeline resources", + obj: objects.NewTaskRunObjectV1Beta1(&v1beta1.TaskRun{ //nolint:staticcheck + Spec: v1beta1.TaskRunSpec{ + Resources: &v1beta1.TaskRunResources{ //nolint:staticcheck + Inputs: []v1beta1.TaskResourceBinding{ //nolint:staticcheck + { + PipelineResourceBinding: v1beta1.PipelineResourceBinding{ //nolint:staticcheck + Name: "nil-resource-spec", + }, + }, { + PipelineResourceBinding: v1beta1.PipelineResourceBinding{ //nolint:staticcheck + Name: "repo", + ResourceSpec: &v1alpha1.PipelineResourceSpec{ //nolint:staticcheck + Params: []v1alpha1.ResourceParam{ //nolint:staticcheck + {Name: "url", Value: "https://github.com/GoogleContainerTools/distroless"}, + {Name: "revision", Value: "my-revision"}, + }, + Type: backport.PipelineResourceTypeGit, }, - Type: backport.PipelineResourceTypeGit, }, }, }, }, }, - }, - Status: v1beta1.TaskRunStatus{ - TaskRunStatusFields: v1beta1.TaskRunStatusFields{ - TaskRunResults: []v1beta1.TaskRunResult{ - { - Name: "img1_input" + "-" + artifacts.ArtifactsInputsResultName, - Value: *v1beta1.NewObject(map[string]string{ - "uri": "gcr.io/foo/bar", - "digest": digest, - }), + Status: v1beta1.TaskRunStatus{ + TaskRunStatusFields: v1beta1.TaskRunStatusFields{ + TaskRunResults: []v1beta1.TaskRunResult{ + { + Name: "img1_input" + "-" + artifacts.ArtifactsInputsResultName, + Value: *v1beta1.NewObject(map[string]string{ + "uri": "gcr.io/foo/bar", + "digest": digest, + }), + }, }, - }, - ResourcesResult: []v1beta1.PipelineResourceResult{ - { - ResourceName: "repo", - Key: "commit", - Value: "50c56a48cfb3a5a80fa36ed91c739bdac8381cbe", - }, { - ResourceName: "repo", - Key: "url", - Value: "https://github.com/GoogleContainerTools/distroless", + ResourcesResult: []v1beta1.PipelineResourceResult{ + { + ResourceName: "repo", + Key: "commit", + Value: "50c56a48cfb3a5a80fa36ed91c739bdac8381cbe", + }, { + ResourceName: "repo", + Key: "url", + Value: "https://github.com/GoogleContainerTools/distroless", + }, }, }, }, - }, - }, - want: []common.ProvenanceMaterial{ - { - URI: "gcr.io/foo/bar", - Digest: common.DigestSet{ - "sha256": strings.TrimPrefix(digest, "sha256:"), + }), + want: []common.ProvenanceMaterial{ + { + URI: "gcr.io/foo/bar", + Digest: common.DigestSet{ + "sha256": strings.TrimPrefix(digest, "sha256:"), + }, }, - }, - { - URI: artifacts.GitSchemePrefix + "https://github.com/GoogleContainerTools/distroless.git@my-revision", - Digest: common.DigestSet{ - "sha1": "50c56a48cfb3a5a80fa36ed91c739bdac8381cbe", + { + URI: artifacts.GitSchemePrefix + "https://github.com/GoogleContainerTools/distroless.git@my-revision", + Digest: common.DigestSet{ + "sha1": "50c56a48cfb3a5a80fa36ed91c739bdac8381cbe", + }, }, }, }, - }, { - name: "materials from git results in task run spec", - taskRun: &v1beta1.TaskRun{ - Spec: v1beta1.TaskRunSpec{ - Params: []v1beta1.Param{{ - Name: "CHAINS-GIT_COMMIT", - Value: *v1beta1.NewStructuredValues("my-commit"), - }, { - Name: "CHAINS-GIT_URL", - Value: *v1beta1.NewStructuredValues("github.com/something"), - }}, - }, - }, - want: []common.ProvenanceMaterial{ - { - URI: artifacts.GitSchemePrefix + "github.com/something.git", - Digest: common.DigestSet{ - "sha1": "my-commit", + { + name: "materials from git results in task run spec", + obj: objects.NewTaskRunObjectV1(&v1.TaskRun{ + Spec: v1.TaskRunSpec{ + Params: []v1.Param{{ + Name: "CHAINS-GIT_COMMIT", + Value: *v1.NewStructuredValues("my-commit"), + }, { + Name: "CHAINS-GIT_URL", + Value: *v1.NewStructuredValues("github.com/something"), + }}, }, - }, - }, - }, { - name: "materials from git results in task spec", - taskRun: &v1beta1.TaskRun{ - Status: v1beta1.TaskRunStatus{ - TaskRunStatusFields: v1beta1.TaskRunStatusFields{ - TaskSpec: &v1beta1.TaskSpec{ - Params: []v1beta1.ParamSpec{{ - Name: "CHAINS-GIT_COMMIT", - Default: &v1beta1.ParamValue{ - StringVal: "my-commit", - }, - }, { - Name: "CHAINS-GIT_URL", - Default: &v1beta1.ParamValue{ - StringVal: "github.com/something", - }, - }}, + }), + want: []common.ProvenanceMaterial{ + { + URI: artifacts.GitSchemePrefix + "github.com/something.git", + Digest: common.DigestSet{ + "sha1": "my-commit", }, }, }, }, - want: []common.ProvenanceMaterial{ - { - URI: artifacts.GitSchemePrefix + "github.com/something.git", - Digest: common.DigestSet{ - "sha1": "my-commit", - }, - }, - }, - }, { - name: "materials from git results in task spec and taskrun spec", - taskRun: &v1beta1.TaskRun{ - Spec: v1beta1.TaskRunSpec{ - Params: []v1beta1.Param{{ - Name: "CHAINS-GIT_URL", - Value: v1beta1.ParamValue{ - StringVal: "github.com/something", + { + name: "materials from git results in task spec", + obj: objects.NewTaskRunObjectV1(&v1.TaskRun{ + Status: v1.TaskRunStatus{ + TaskRunStatusFields: v1.TaskRunStatusFields{ + TaskSpec: &v1.TaskSpec{ + Params: []v1.ParamSpec{{ + Name: "CHAINS-GIT_COMMIT", + Default: &v1.ParamValue{ + StringVal: "my-commit", + }, + }, { + Name: "CHAINS-GIT_URL", + Default: &v1.ParamValue{ + StringVal: "github.com/something", + }, + }}, + }, }, - }}, - }, - Status: v1beta1.TaskRunStatus{ - TaskRunStatusFields: v1beta1.TaskRunStatusFields{ - TaskSpec: &v1beta1.TaskSpec{ - Params: []v1beta1.ParamSpec{{ - Name: "CHAINS-GIT_URL", - }, { - Name: "CHAINS-GIT_COMMIT", - Default: &v1beta1.ParamValue{ - StringVal: "my-commit", - }, - }}, + }, + }), + want: []common.ProvenanceMaterial{ + { + URI: artifacts.GitSchemePrefix + "github.com/something.git", + Digest: common.DigestSet{ + "sha1": "my-commit", }, }, }, }, - want: []common.ProvenanceMaterial{{ - URI: "git+github.com/something.git", - Digest: common.DigestSet{ - "sha1": "my-commit", - }, - }}, - }, { - name: "materials from step images", - taskRun: &v1beta1.TaskRun{ - Status: v1beta1.TaskRunStatus{ - TaskRunStatusFields: v1beta1.TaskRunStatusFields{ - Steps: []v1beta1.StepState{{ - Name: "git-source-repo-jwqcl", - ImageID: "gcr.io/tekton-releases/github.com/tektoncd/pipeline/cmd/git-init@sha256:b963f6e7a69617db57b685893256f978436277094c21d43b153994acd8a01247", - }, { - Name: "git-source-repo-repeat-again-jwqcl", - ImageID: "gcr.io/tekton-releases/github.com/tektoncd/pipeline/cmd/git-init@sha256:b963f6e7a69617db57b685893256f978436277094c21d43b153994acd8a01247", - }, { - Name: "build", - ImageID: "gcr.io/cloud-marketplace-containers/google/bazel@sha256:010a1ecd1a8c3610f12039a25b823e3a17bd3e8ae455a53e340dcfdd37a49964", + { + name: "materials from git results in task spec and taskrun spec", + obj: objects.NewTaskRunObjectV1(&v1.TaskRun{ + Spec: v1.TaskRunSpec{ + Params: []v1.Param{{ + Name: "CHAINS-GIT_URL", + Value: v1.ParamValue{ + StringVal: "github.com/something", + }, }}, }, - }, - }, - want: []common.ProvenanceMaterial{ - { - URI: artifacts.OCIScheme + "gcr.io/tekton-releases/github.com/tektoncd/pipeline/cmd/git-init", - Digest: common.DigestSet{ - "sha256": "b963f6e7a69617db57b685893256f978436277094c21d43b153994acd8a01247", + Status: v1.TaskRunStatus{ + TaskRunStatusFields: v1.TaskRunStatusFields{ + TaskSpec: &v1.TaskSpec{ + Params: []v1.ParamSpec{{ + Name: "CHAINS-GIT_URL", + }, { + Name: "CHAINS-GIT_COMMIT", + Default: &v1.ParamValue{ + StringVal: "my-commit", + }, + }}, + }, + }, }, - }, - { - URI: artifacts.OCIScheme + "gcr.io/cloud-marketplace-containers/google/bazel", + }), + want: []common.ProvenanceMaterial{{ + URI: "git+github.com/something.git", Digest: common.DigestSet{ - "sha256": "010a1ecd1a8c3610f12039a25b823e3a17bd3e8ae455a53e340dcfdd37a49964", + "sha1": "my-commit", }, - }, + }}, }, - }, { - name: "materials from step and sidecar images", - taskRun: &v1beta1.TaskRun{ - Status: v1beta1.TaskRunStatus{ - TaskRunStatusFields: v1beta1.TaskRunStatusFields{ - Steps: []v1beta1.StepState{{ - Name: "git-source-repo-jwqcl", - ImageID: "gcr.io/tekton-releases/github.com/tektoncd/pipeline/cmd/git-init@sha256:b963f6e7a69617db57b685893256f978436277094c21d43b153994acd8a01247", - }, { - Name: "git-source-repo-repeat-again-jwqcl", - ImageID: "gcr.io/tekton-releases/github.com/tektoncd/pipeline/cmd/git-init@sha256:b963f6e7a69617db57b685893256f978436277094c21d43b153994acd8a01247", - }, { - Name: "build", - ImageID: "gcr.io/cloud-marketplace-containers/google/bazel@sha256:010a1ecd1a8c3610f12039a25b823e3a17bd3e8ae455a53e340dcfdd37a49964", - }}, - Sidecars: []v1beta1.SidecarState{{ - Name: "sidecar-jwqcl", - ImageID: "gcr.io/tekton-releases/github.com/tektoncd/pipeline/cmd/sidecar-git-init@sha256:a1234f6e7a69617db57b685893256f978436277094c21d43b153994acd8a09567", - }}, + { + name: "materials from step images", + obj: objects.NewTaskRunObjectV1(&v1.TaskRun{ + Status: v1.TaskRunStatus{ + TaskRunStatusFields: v1.TaskRunStatusFields{ + Steps: []v1.StepState{{ + Name: "git-source-repo-jwqcl", + ImageID: "gcr.io/tekton-releases/github.com/tektoncd/pipeline/cmd/git-init@sha256:b963f6e7a69617db57b685893256f978436277094c21d43b153994acd8a01247", + }, { + Name: "git-source-repo-repeat-again-jwqcl", + ImageID: "gcr.io/tekton-releases/github.com/tektoncd/pipeline/cmd/git-init@sha256:b963f6e7a69617db57b685893256f978436277094c21d43b153994acd8a01247", + }, { + Name: "build", + ImageID: "gcr.io/cloud-marketplace-containers/google/bazel@sha256:010a1ecd1a8c3610f12039a25b823e3a17bd3e8ae455a53e340dcfdd37a49964", + }}, + }, + }, + }), + want: []common.ProvenanceMaterial{ + { + URI: artifacts.OCIScheme + "gcr.io/tekton-releases/github.com/tektoncd/pipeline/cmd/git-init", + Digest: common.DigestSet{ + "sha256": "b963f6e7a69617db57b685893256f978436277094c21d43b153994acd8a01247", + }, + }, + { + URI: artifacts.OCIScheme + "gcr.io/cloud-marketplace-containers/google/bazel", + Digest: common.DigestSet{ + "sha256": "010a1ecd1a8c3610f12039a25b823e3a17bd3e8ae455a53e340dcfdd37a49964", + }, }, }, }, - want: []common.ProvenanceMaterial{ - { - URI: artifacts.OCIScheme + "gcr.io/tekton-releases/github.com/tektoncd/pipeline/cmd/git-init", - Digest: common.DigestSet{ - "sha256": "b963f6e7a69617db57b685893256f978436277094c21d43b153994acd8a01247", - }, - }, { - URI: artifacts.OCIScheme + "gcr.io/cloud-marketplace-containers/google/bazel", - Digest: common.DigestSet{ - "sha256": "010a1ecd1a8c3610f12039a25b823e3a17bd3e8ae455a53e340dcfdd37a49964", + { + name: "materials from step and sidecar images", + obj: objects.NewTaskRunObjectV1(&v1.TaskRun{ + Status: v1.TaskRunStatus{ + TaskRunStatusFields: v1.TaskRunStatusFields{ + Steps: []v1.StepState{{ + Name: "git-source-repo-jwqcl", + ImageID: "gcr.io/tekton-releases/github.com/tektoncd/pipeline/cmd/git-init@sha256:b963f6e7a69617db57b685893256f978436277094c21d43b153994acd8a01247", + }, { + Name: "git-source-repo-repeat-again-jwqcl", + ImageID: "gcr.io/tekton-releases/github.com/tektoncd/pipeline/cmd/git-init@sha256:b963f6e7a69617db57b685893256f978436277094c21d43b153994acd8a01247", + }, { + Name: "build", + ImageID: "gcr.io/cloud-marketplace-containers/google/bazel@sha256:010a1ecd1a8c3610f12039a25b823e3a17bd3e8ae455a53e340dcfdd37a49964", + }}, + Sidecars: []v1.SidecarState{{ + Name: "sidecar-jwqcl", + ImageID: "gcr.io/tekton-releases/github.com/tektoncd/pipeline/cmd/sidecar-git-init@sha256:a1234f6e7a69617db57b685893256f978436277094c21d43b153994acd8a09567", + }}, + }, }, - }, { - URI: artifacts.OCIScheme + "gcr.io/tekton-releases/github.com/tektoncd/pipeline/cmd/sidecar-git-init", - Digest: common.DigestSet{ - "sha256": "a1234f6e7a69617db57b685893256f978436277094c21d43b153994acd8a09567", + }), + want: []common.ProvenanceMaterial{ + { + URI: artifacts.OCIScheme + "gcr.io/tekton-releases/github.com/tektoncd/pipeline/cmd/git-init", + Digest: common.DigestSet{ + "sha256": "b963f6e7a69617db57b685893256f978436277094c21d43b153994acd8a01247", + }, + }, { + URI: artifacts.OCIScheme + "gcr.io/cloud-marketplace-containers/google/bazel", + Digest: common.DigestSet{ + "sha256": "010a1ecd1a8c3610f12039a25b823e3a17bd3e8ae455a53e340dcfdd37a49964", + }, + }, { + URI: artifacts.OCIScheme + "gcr.io/tekton-releases/github.com/tektoncd/pipeline/cmd/sidecar-git-init", + Digest: common.DigestSet{ + "sha256": "a1234f6e7a69617db57b685893256f978436277094c21d43b153994acd8a09567", + }, }, }, }, - }} + } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { ctx := logtesting.TestContextWithLogger(t) - mat, err := TaskMaterials(ctx, objects.NewTaskRunObject(tc.taskRun)) + // convert tekton object to taskrun objet + var input *objects.TaskRunObjectV1 + var err error + if obj, ok := tc.obj.(*objects.TaskRunObjectV1); ok { + input = obj + } + + if trV1Beta1, ok := tc.obj.GetObject().(*v1beta1.TaskRun); ok { //nolint:staticcheck + trV1 := &v1.TaskRun{} + if err := trV1Beta1.ConvertTo(ctx, trV1); err == nil { + input = objects.NewTaskRunObjectV1(trV1) + } + } + mat, err := TaskMaterials(ctx, input) if err != nil { t.Fatalf("Did not expect an error but got %v", err) } @@ -360,7 +379,7 @@ func TestPipelineMaterials(t *testing.T) { {URI: artifacts.GitSchemePrefix + "https://git.test.com.git", Digest: common.DigestSet{"sha1": "abcd"}}, } ctx := logtesting.TestContextWithLogger(t) - got, err := PipelineMaterials(ctx, createPro("../../testdata/pipelinerun1.json"), &slsaconfig.SlsaConfig{DeepInspectionEnabled: false}) + got, err := PipelineMaterials(ctx, createPro("../../testdata/pipeline-v1/pipelinerun1.json"), &slsaconfig.SlsaConfig{DeepInspectionEnabled: false}) if err != nil { t.Error(err) } @@ -394,7 +413,7 @@ func TestStructuredResultPipelineMaterials(t *testing.T) { }, } ctx := logtesting.TestContextWithLogger(t) - got, err := PipelineMaterials(ctx, createPro("../../testdata/pipelinerun_structured_results.json"), &slsaconfig.SlsaConfig{DeepInspectionEnabled: false}) + got, err := PipelineMaterials(ctx, createPro("../../testdata/pipeline-v1/pipelinerun_structured_results.json"), &slsaconfig.SlsaConfig{DeepInspectionEnabled: false}) if err != nil { t.Errorf("error while extracting materials: %v", err) } @@ -443,20 +462,20 @@ func TestFromImageID(t *testing.T) { func TestFromPipelineParamsAndResults(t *testing.T) { tests := []struct { name string - pipelineRunObject *objects.PipelineRunObject + pipelineRunObject *objects.PipelineRunObjectV1 enableDeepInspection bool want []common.ProvenanceMaterial }{{ name: "from results", - pipelineRunObject: objects.NewPipelineRunObject(&v1beta1.PipelineRun{ - Status: v1beta1.PipelineRunStatus{ - PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{ - PipelineResults: []v1beta1.PipelineRunResult{{ + pipelineRunObject: objects.NewPipelineRunObjectV1(&v1.PipelineRun{ + Status: v1.PipelineRunStatus{ + PipelineRunStatusFields: v1.PipelineRunStatusFields{ + Results: []v1.PipelineRunResult{{ Name: "CHAINS-GIT_COMMIT", - Value: *v1beta1.NewStructuredValues("my-commit"), + Value: *v1.NewStructuredValues("my-commit"), }, { Name: "CHAINS-GIT_URL", - Value: *v1beta1.NewStructuredValues("github.com/something"), + Value: *v1.NewStructuredValues("github.com/something"), }}, }, }, @@ -469,18 +488,18 @@ func TestFromPipelineParamsAndResults(t *testing.T) { }}, }, { name: "from pipelinespec", - pipelineRunObject: objects.NewPipelineRunObject(&v1beta1.PipelineRun{ - Status: v1beta1.PipelineRunStatus{ - PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{ - PipelineSpec: &v1beta1.PipelineSpec{ - Params: []v1beta1.ParamSpec{{ + pipelineRunObject: objects.NewPipelineRunObjectV1(&v1.PipelineRun{ + Status: v1.PipelineRunStatus{ + PipelineRunStatusFields: v1.PipelineRunStatusFields{ + PipelineSpec: &v1.PipelineSpec{ + Params: []v1.ParamSpec{{ Name: "CHAINS-GIT_COMMIT", - Default: &v1beta1.ParamValue{ + Default: &v1.ParamValue{ StringVal: "my-commit", }, }, { Name: "CHAINS-GIT_URL", - Default: &v1beta1.ParamValue{ + Default: &v1.ParamValue{ StringVal: "github.com/something", }, }}, @@ -496,16 +515,16 @@ func TestFromPipelineParamsAndResults(t *testing.T) { }}, }, { name: "from pipelineRunSpec", - pipelineRunObject: objects.NewPipelineRunObject(&v1beta1.PipelineRun{ - Spec: v1beta1.PipelineRunSpec{ - Params: []v1beta1.Param{{ + pipelineRunObject: objects.NewPipelineRunObjectV1(&v1.PipelineRun{ + Spec: v1.PipelineRunSpec{ + Params: []v1.Param{{ Name: "CHAINS-GIT_COMMIT", - Value: v1beta1.ParamValue{ + Value: v1.ParamValue{ StringVal: "my-commit", }, }, { Name: "CHAINS-GIT_URL", - Value: v1beta1.ParamValue{ + Value: v1.ParamValue{ StringVal: "github.com/something", }, }}, @@ -519,25 +538,25 @@ func TestFromPipelineParamsAndResults(t *testing.T) { }}, }, { name: "from completeChain", - pipelineRunObject: objects.NewPipelineRunObject(&v1beta1.PipelineRun{ - Spec: v1beta1.PipelineRunSpec{ - Params: []v1beta1.Param{{ + pipelineRunObject: objects.NewPipelineRunObjectV1(&v1.PipelineRun{ + Spec: v1.PipelineRunSpec{ + Params: []v1.Param{{ Name: "CHAINS-GIT_URL", - Value: v1beta1.ParamValue{ + Value: v1.ParamValue{ StringVal: "github.com/something", }, }}, }, - Status: v1beta1.PipelineRunStatus{ - PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{ - PipelineSpec: &v1beta1.PipelineSpec{ - Params: []v1beta1.ParamSpec{{ + Status: v1.PipelineRunStatus{ + PipelineRunStatusFields: v1.PipelineRunStatusFields{ + PipelineSpec: &v1.PipelineSpec{ + Params: []v1.ParamSpec{{ Name: "CHAINS-GIT_URL", }}, }, - PipelineResults: []v1beta1.PipelineRunResult{{ + Results: []v1.PipelineRunResult{{ Name: "CHAINS-GIT_COMMIT", - Value: *v1beta1.NewStructuredValues("my-commit"), + Value: *v1.NewStructuredValues("my-commit"), }}, }, }, @@ -548,24 +567,25 @@ func TestFromPipelineParamsAndResults(t *testing.T) { "sha1": "my-commit", }, }}, - }, { - name: "deep inspection: pipelinerun param and task result", - pipelineRunObject: createProWithPipelineParamAndTaskResult(), - enableDeepInspection: true, - want: []common.ProvenanceMaterial{ - { - URI: "git+github.com/pipelinerun-param.git", - Digest: common.DigestSet{ - "sha1": "115734d92807a80158b4b7af605d768c647fdb3d", - }, - }, { - URI: "github.com/childtask-result", - Digest: common.DigestSet{ - "sha1": "225734d92807a80158b4b7af605d768c647fdb3d", + }, + { + name: "deep inspection: pipelinerun param and task result", + pipelineRunObject: createProWithPipelineParamAndTaskResult(), + enableDeepInspection: true, + want: []common.ProvenanceMaterial{ + { + URI: "git+github.com/pipelinerun-param.git", + Digest: common.DigestSet{ + "sha1": "115734d92807a80158b4b7af605d768c647fdb3d", + }, + }, { + URI: "github.com/childtask-result", + Digest: common.DigestSet{ + "sha1": "225734d92807a80158b4b7af605d768c647fdb3d", + }, }, }, }, - }, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { @@ -579,19 +599,19 @@ func TestFromPipelineParamsAndResults(t *testing.T) { } //nolint:all -func createProWithPipelineParamAndTaskResult() *objects.PipelineRunObject { - pro := objects.NewPipelineRunObject(&v1beta1.PipelineRun{ - Status: v1beta1.PipelineRunStatus{ - PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{ - PipelineSpec: &v1beta1.PipelineSpec{ - Params: []v1beta1.ParamSpec{{ +func createProWithPipelineParamAndTaskResult() *objects.PipelineRunObjectV1 { + pro := objects.NewPipelineRunObjectV1(&v1.PipelineRun{ + Status: v1.PipelineRunStatus{ + PipelineRunStatusFields: v1.PipelineRunStatusFields{ + PipelineSpec: &v1.PipelineSpec{ + Params: []v1.ParamSpec{{ Name: "CHAINS-GIT_COMMIT", - Default: &v1beta1.ParamValue{ + Default: &v1.ParamValue{ StringVal: "115734d92807a80158b4b7af605d768c647fdb3d", }, }, { Name: "CHAINS-GIT_URL", - Default: &v1beta1.ParamValue{ + Default: &v1.ParamValue{ StringVal: "github.com/pipelinerun-param", }, }}, @@ -601,15 +621,15 @@ func createProWithPipelineParamAndTaskResult() *objects.PipelineRunObject { }) pipelineTaskName := "my-clone-task" - tr := &v1beta1.TaskRun{ + tr := &v1.TaskRun{ ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{objects.PipelineTaskLabel: pipelineTaskName}}, - Status: v1beta1.TaskRunStatus{ - TaskRunStatusFields: v1beta1.TaskRunStatusFields{ + Status: v1.TaskRunStatus{ + TaskRunStatusFields: v1.TaskRunStatusFields{ CompletionTime: &metav1.Time{Time: time.Date(1995, time.December, 24, 6, 12, 12, 24, time.UTC)}, - TaskRunResults: []v1beta1.TaskRunResult{ + Results: []v1.TaskRunResult{ { Name: "ARTIFACT_INPUTS", - Value: *v1beta1.NewObject(map[string]string{ + Value: *v1.NewObject(map[string]string{ "uri": "github.com/childtask-result", "digest": "sha1:225734d92807a80158b4b7af605d768c647fdb3d", })}, @@ -619,6 +639,6 @@ func createProWithPipelineParamAndTaskResult() *objects.PipelineRunObject { } pro.AppendTaskRun(tr) - pro.Status.PipelineSpec.Tasks = []v1beta1.PipelineTask{{Name: pipelineTaskName}} + pro.Status.PipelineSpec.Tasks = []v1.PipelineTask{{Name: pipelineTaskName}} return pro } diff --git a/pkg/chains/formats/slsa/internal/material/v1beta1/material.go b/pkg/chains/formats/slsa/internal/material/v1beta1/material.go new file mode 100644 index 0000000000..3bcec5480b --- /dev/null +++ b/pkg/chains/formats/slsa/internal/material/v1beta1/material.go @@ -0,0 +1,328 @@ +/* +Copyright 2023 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "context" + "fmt" + "strings" + + "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common" + "github.com/tektoncd/chains/internal/backport" + "github.com/tektoncd/chains/pkg/artifacts" + "github.com/tektoncd/chains/pkg/chains/formats/slsa/attest" + "github.com/tektoncd/chains/pkg/chains/formats/slsa/internal/artifact" + "github.com/tektoncd/chains/pkg/chains/formats/slsa/internal/slsaconfig" + "github.com/tektoncd/chains/pkg/chains/objects" + "knative.dev/pkg/logging" +) + +const ( + uriSeparator = "@" + digestSeparator = ":" +) + +// TaskMaterials constructs `predicate.materials` section by collecting all the artifacts that influence a taskrun such as source code repo and step&sidecar base images. +func TaskMaterials(ctx context.Context, tro *objects.TaskRunObjectV1Beta1) ([]common.ProvenanceMaterial, error) { + var mats []common.ProvenanceMaterial + + // add step images + stepMaterials, err := FromStepImages(tro) + if err != nil { + return nil, err + } + mats = artifact.AppendMaterials(mats, stepMaterials...) + + // add sidecar images + sidecarMaterials, err := FromSidecarImages(tro) + if err != nil { + return nil, err + } + mats = artifact.AppendMaterials(mats, sidecarMaterials...) + + mats = artifact.AppendMaterials(mats, FromTaskParamsAndResults(ctx, tro)...) + + // add task resources + mats = artifact.AppendMaterials(mats, FromTaskResources(ctx, tro)...) + + return mats, nil +} + +func PipelineMaterials(ctx context.Context, pro *objects.PipelineRunObjectV1Beta1, slsaconfig *slsaconfig.SlsaConfig) ([]common.ProvenanceMaterial, error) { + logger := logging.FromContext(ctx) + var mats []common.ProvenanceMaterial + if p := pro.Status.Provenance; p != nil && p.RefSource != nil { + m := common.ProvenanceMaterial{ + URI: p.RefSource.URI, + Digest: p.RefSource.Digest, + } + mats = artifact.AppendMaterials(mats, m) + } + pSpec := pro.Status.PipelineSpec + if pSpec != nil { + pipelineTasks := append(pSpec.Tasks, pSpec.Finally...) + for _, t := range pipelineTasks { + tr := pro.GetTaskRunFromTask(t.Name) + // Ignore Tasks that did not execute during the PipelineRun. + if tr == nil || tr.Status.CompletionTime == nil { + logger.Infof("taskrun status not found for task %s", t.Name) + continue + } + + stepMaterials, err := FromStepImages(tr) + if err != nil { + return mats, err + } + mats = artifact.AppendMaterials(mats, stepMaterials...) + + // add sidecar images + sidecarMaterials, err := FromSidecarImages(tr) + if err != nil { + return nil, err + } + mats = artifact.AppendMaterials(mats, sidecarMaterials...) + + // add remote task configsource information in materials + if tr.Status.Provenance != nil && tr.Status.Provenance.RefSource != nil { + m := common.ProvenanceMaterial{ + URI: tr.Status.Provenance.RefSource.URI, + Digest: tr.Status.Provenance.RefSource.Digest, + } + mats = artifact.AppendMaterials(mats, m) + } + } + } + + mats = artifact.AppendMaterials(mats, FromPipelineParamsAndResults(ctx, pro, slsaconfig)...) + + return mats, nil +} + +// FromStepImages gets predicate.materials from step images +func FromStepImages(tro *objects.TaskRunObjectV1Beta1) ([]common.ProvenanceMaterial, error) { + mats := []common.ProvenanceMaterial{} + for _, image := range tro.GetStepImages() { + m, err := fromImageID(image) + if err != nil { + return nil, err + } + mats = artifact.AppendMaterials(mats, m) + } + return mats, nil +} + +// FromSidecarImages gets predicate.materials from sidecar images +func FromSidecarImages(tro *objects.TaskRunObjectV1Beta1) ([]common.ProvenanceMaterial, error) { + mats := []common.ProvenanceMaterial{} + for _, image := range tro.GetSidecarImages() { + m, err := fromImageID(image) + if err != nil { + return nil, err + } + mats = artifact.AppendMaterials(mats, m) + } + return mats, nil +} + +// fromImageID converts an imageId with format @sha256: and generates a provenance materials. +func fromImageID(imageID string) (common.ProvenanceMaterial, error) { + uriDigest := strings.Split(imageID, uriSeparator) + if len(uriDigest) != 2 { + return common.ProvenanceMaterial{}, fmt.Errorf("expected imageID %s to be separable by @", imageID) + } + digest := strings.Split(uriDigest[1], digestSeparator) + if len(digest) != 2 { + return common.ProvenanceMaterial{}, fmt.Errorf("expected imageID %s to be separable by @ and :", imageID) + } + uri := strings.TrimPrefix(uriDigest[0], "docker-pullable://") + m := common.ProvenanceMaterial{ + Digest: common.DigestSet{}, + } + m.URI = artifacts.OCIScheme + uri + m.Digest[digest[0]] = digest[1] + return m, nil +} + +// FromTaskResourcesToMaterials gets materials from task resources. +func FromTaskResources(ctx context.Context, tro *objects.TaskRunObjectV1Beta1) []common.ProvenanceMaterial { + mats := []common.ProvenanceMaterial{} + if tro.Spec.Resources != nil { //nolint:all //incompatible with pipelines v0.45 + // check for a Git PipelineResource + for _, input := range tro.Spec.Resources.Inputs { //nolint:all //incompatible with pipelines v0.45 + if input.ResourceSpec == nil || input.ResourceSpec.Type != backport.PipelineResourceTypeGit { //nolint:all //incompatible with pipelines v0.45 + continue + } + + m := common.ProvenanceMaterial{ + Digest: common.DigestSet{}, + } + + for _, rr := range tro.Status.ResourcesResult { + if rr.ResourceName != input.Name { + continue + } + if rr.Key == "url" { + m.URI = attest.SPDXGit(rr.Value, "") + } else if rr.Key == "commit" { + m.Digest["sha1"] = rr.Value + } + } + + var url string + var revision string + for _, param := range input.ResourceSpec.Params { + if param.Name == "url" { + url = param.Value + } + if param.Name == "revision" { + revision = param.Value + } + } + m.URI = attest.SPDXGit(url, revision) + mats = artifact.AppendMaterials(mats, m) + } + } + return mats +} + +// FromTaskParamsAndResults scans over the taskrun, taskspec params and taskrun results +// and looks for unstructured type hinted names matching CHAINS-GIT_COMMIT and CHAINS-GIT_URL +// to extract the commit and url value for input artifact materials. +func FromTaskParamsAndResults(ctx context.Context, tro *objects.TaskRunObjectV1Beta1) []common.ProvenanceMaterial { + var commit, url string + // Scan for git params to use for materials + if tro.Status.TaskSpec != nil { + for _, p := range tro.Status.TaskSpec.Params { + if p.Default == nil { + continue + } + if p.Name == attest.CommitParam { + commit = p.Default.StringVal + continue + } + if p.Name == attest.URLParam { + url = p.Default.StringVal + } + } + } + + for _, p := range tro.Spec.Params { + if p.Name == attest.CommitParam { + commit = p.Value.StringVal + continue + } + if p.Name == attest.URLParam { + url = p.Value.StringVal + } + } + + for _, r := range tro.Status.TaskRunResults { + if r.Name == attest.CommitParam { + commit = r.Value.StringVal + } + if r.Name == attest.URLParam { + url = r.Value.StringVal + } + } + + url = attest.SPDXGit(url, "") + + var mats []common.ProvenanceMaterial + if commit != "" && url != "" { + mats = artifact.AppendMaterials(mats, common.ProvenanceMaterial{ + URI: url, + // TODO. this could be sha256 as well. Fix in another PR. + Digest: map[string]string{"sha1": commit}, + }) + } + + sms := artifacts.RetrieveMaterialsFromStructuredResults(ctx, tro, artifacts.ArtifactsInputsResultName) + mats = artifact.AppendMaterials(mats, sms...) + + return mats +} + +// FromPipelineParamsAndResults extracts type hinted params and results and adds the url and digest to materials. +func FromPipelineParamsAndResults(ctx context.Context, pro *objects.PipelineRunObjectV1Beta1, slsaconfig *slsaconfig.SlsaConfig) []common.ProvenanceMaterial { + mats := []common.ProvenanceMaterial{} + sms := artifacts.RetrieveMaterialsFromStructuredResults(ctx, pro, artifacts.ArtifactsInputsResultName) + mats = artifact.AppendMaterials(mats, sms...) + + var commit, url string + + pSpec := pro.Status.PipelineSpec + if pSpec != nil { + // search type hinting param/results from each individual taskruns + if slsaconfig.DeepInspectionEnabled { + logger := logging.FromContext(ctx) + pipelineTasks := append(pSpec.Tasks, pSpec.Finally...) + for _, t := range pipelineTasks { + tr := pro.GetTaskRunFromTask(t.Name) + // Ignore Tasks that did not execute during the PipelineRun. + if tr == nil || tr.Status.CompletionTime == nil { + logger.Infof("taskrun is not found or not completed for the task %s", t.Name) + continue + } + materialsFromTasks := FromTaskParamsAndResults(ctx, tr) + mats = artifact.AppendMaterials(mats, materialsFromTasks...) + } + } + + // search status.PipelineSpec.params + for _, p := range pSpec.Params { + if p.Default == nil { + continue + } + if p.Name == attest.CommitParam { + commit = p.Default.StringVal + continue + } + if p.Name == attest.URLParam { + url = p.Default.StringVal + } + } + } + + // search pipelineRunSpec.params + for _, p := range pro.Spec.Params { + if p.Name == attest.CommitParam { + commit = p.Value.StringVal + continue + } + if p.Name == attest.URLParam { + url = p.Value.StringVal + } + } + + // search status.PipelineRunResults + for _, r := range pro.Status.PipelineResults { + if r.Name == attest.CommitParam { + commit = r.Value.StringVal + } + if r.Name == attest.URLParam { + url = r.Value.StringVal + } + } + if len(commit) > 0 && len(url) > 0 { + url = attest.SPDXGit(url, "") + mats = artifact.AppendMaterials(mats, common.ProvenanceMaterial{ + URI: url, + Digest: map[string]string{"sha1": commit}, + }) + } + return mats +} diff --git a/pkg/chains/formats/slsa/internal/material/v1beta1/material_test.go b/pkg/chains/formats/slsa/internal/material/v1beta1/material_test.go new file mode 100644 index 0000000000..f9a0dc4fb3 --- /dev/null +++ b/pkg/chains/formats/slsa/internal/material/v1beta1/material_test.go @@ -0,0 +1,624 @@ +/* +Copyright 2023 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "fmt" + "reflect" + "strings" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common" + "github.com/tektoncd/chains/internal/backport" + "github.com/tektoncd/chains/pkg/artifacts" + "github.com/tektoncd/chains/pkg/chains/formats/slsa/internal/compare" + "github.com/tektoncd/chains/pkg/chains/formats/slsa/internal/slsaconfig" + "github.com/tektoncd/chains/pkg/chains/objects" + "github.com/tektoncd/chains/pkg/internal/objectloader" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + logtesting "knative.dev/pkg/logging/testing" + "sigs.k8s.io/yaml" +) + +const digest = "sha256:05f95b26ed10668b7183c1e2da98610e91372fa9f510046d4ce5812addad86b7" + +func createPro(path string) *objects.PipelineRunObjectV1Beta1 { + var err error + pr, err := objectloader.PipelineRunV1Beta1FromFile(path) + if err != nil { + panic(err) + } + tr1, err := objectloader.TaskRunV1Beta1FromFile("../../../testdata/pipeline-v1beta1/taskrun1.json") + if err != nil { + panic(err) + } + tr2, err := objectloader.TaskRunV1Beta1FromFile("../../../testdata/pipeline-v1beta1/taskrun2.json") + if err != nil { + panic(err) + } + p := objects.NewPipelineRunObjectV1Beta1(pr) + p.AppendTaskRun(tr1) + p.AppendTaskRun(tr2) + return p +} + +func TestMaterialsWithTaskRunResults(t *testing.T) { + // make sure this works with Git resources + taskrun := `apiVersion: tekton.dev/v1beta1 +kind: TaskRun +spec: + taskSpec: + resources: + inputs: + - name: repo + type: git +status: + taskResults: + - name: CHAINS-GIT_COMMIT + value: 50c56a48cfb3a5a80fa36ed91c739bdac8381cbe + - name: CHAINS-GIT_URL + value: https://github.com/GoogleContainerTools/distroless` + + var taskRun *v1beta1.TaskRun //nolint:staticcheck + if err := yaml.Unmarshal([]byte(taskrun), &taskRun); err != nil { + t.Fatal(err) + } + + want := []common.ProvenanceMaterial{ + { + URI: artifacts.GitSchemePrefix + "https://github.com/GoogleContainerTools/distroless.git", + Digest: common.DigestSet{ + "sha1": "50c56a48cfb3a5a80fa36ed91c739bdac8381cbe", + }, + }, + } + + ctx := logtesting.TestContextWithLogger(t) + got, err := TaskMaterials(ctx, objects.NewTaskRunObjectV1Beta1(taskRun)) + if err != nil { + t.Fatalf("Did not expect an error but got %v", err) + } + if !reflect.DeepEqual(got, want) { + t.Fatalf("want %v got %v", want, got) + } +} + +func TestTaskMaterials(t *testing.T) { + tests := []struct { + name string + taskRun *v1beta1.TaskRun //nolint:staticcheck + want []common.ProvenanceMaterial + }{{ + name: "materials from pipeline resources", + taskRun: &v1beta1.TaskRun{ //nolint:staticcheck + Spec: v1beta1.TaskRunSpec{ + Resources: &v1beta1.TaskRunResources{ //nolint:staticcheck + Inputs: []v1beta1.TaskResourceBinding{ //nolint:staticcheck + { + PipelineResourceBinding: v1beta1.PipelineResourceBinding{ //nolint:staticcheck + Name: "nil-resource-spec", + }, + }, { + PipelineResourceBinding: v1beta1.PipelineResourceBinding{ //nolint:staticcheck + Name: "repo", + ResourceSpec: &v1alpha1.PipelineResourceSpec{ //nolint:staticcheck + Params: []v1alpha1.ResourceParam{ //nolint:staticcheck + {Name: "url", Value: "https://github.com/GoogleContainerTools/distroless"}, + {Name: "revision", Value: "my-revision"}, + }, + Type: backport.PipelineResourceTypeGit, + }, + }, + }, + }, + }, + }, + Status: v1beta1.TaskRunStatus{ + TaskRunStatusFields: v1beta1.TaskRunStatusFields{ + TaskRunResults: []v1beta1.TaskRunResult{ + { + Name: "img1_input" + "-" + artifacts.ArtifactsInputsResultName, + Value: *v1beta1.NewObject(map[string]string{ + "uri": "gcr.io/foo/bar", + "digest": digest, + }), + }, + }, + ResourcesResult: []v1beta1.PipelineResourceResult{ + { + ResourceName: "repo", + Key: "commit", + Value: "50c56a48cfb3a5a80fa36ed91c739bdac8381cbe", + }, { + ResourceName: "repo", + Key: "url", + Value: "https://github.com/GoogleContainerTools/distroless", + }, + }, + }, + }, + }, + want: []common.ProvenanceMaterial{ + { + URI: "gcr.io/foo/bar", + Digest: common.DigestSet{ + "sha256": strings.TrimPrefix(digest, "sha256:"), + }, + }, + { + URI: artifacts.GitSchemePrefix + "https://github.com/GoogleContainerTools/distroless.git@my-revision", + Digest: common.DigestSet{ + "sha1": "50c56a48cfb3a5a80fa36ed91c739bdac8381cbe", + }, + }, + }, + }, { + name: "materials from git results in task run spec", + taskRun: &v1beta1.TaskRun{ //nolint:staticcheck + Spec: v1beta1.TaskRunSpec{ + Params: []v1beta1.Param{{ + Name: "CHAINS-GIT_COMMIT", + Value: *v1beta1.NewStructuredValues("my-commit"), + }, { + Name: "CHAINS-GIT_URL", + Value: *v1beta1.NewStructuredValues("github.com/something"), + }}, + }, + }, + want: []common.ProvenanceMaterial{ + { + URI: artifacts.GitSchemePrefix + "github.com/something.git", + Digest: common.DigestSet{ + "sha1": "my-commit", + }, + }, + }, + }, { + name: "materials from git results in task spec", + taskRun: &v1beta1.TaskRun{ //nolint:staticcheck + Status: v1beta1.TaskRunStatus{ + TaskRunStatusFields: v1beta1.TaskRunStatusFields{ + TaskSpec: &v1beta1.TaskSpec{ + Params: []v1beta1.ParamSpec{{ + Name: "CHAINS-GIT_COMMIT", + Default: &v1beta1.ParamValue{ + StringVal: "my-commit", + }, + }, { + Name: "CHAINS-GIT_URL", + Default: &v1beta1.ParamValue{ + StringVal: "github.com/something", + }, + }}, + }, + }, + }, + }, + want: []common.ProvenanceMaterial{ + { + URI: artifacts.GitSchemePrefix + "github.com/something.git", + Digest: common.DigestSet{ + "sha1": "my-commit", + }, + }, + }, + }, { + name: "materials from git results in task spec and taskrun spec", + taskRun: &v1beta1.TaskRun{ //nolint:staticcheck + Spec: v1beta1.TaskRunSpec{ + Params: []v1beta1.Param{{ + Name: "CHAINS-GIT_URL", + Value: v1beta1.ParamValue{ + StringVal: "github.com/something", + }, + }}, + }, + Status: v1beta1.TaskRunStatus{ + TaskRunStatusFields: v1beta1.TaskRunStatusFields{ + TaskSpec: &v1beta1.TaskSpec{ + Params: []v1beta1.ParamSpec{{ + Name: "CHAINS-GIT_URL", + }, { + Name: "CHAINS-GIT_COMMIT", + Default: &v1beta1.ParamValue{ + StringVal: "my-commit", + }, + }}, + }, + }, + }, + }, + want: []common.ProvenanceMaterial{{ + URI: "git+github.com/something.git", + Digest: common.DigestSet{ + "sha1": "my-commit", + }, + }}, + }, { + name: "materials from step images", + taskRun: &v1beta1.TaskRun{ //nolint:staticcheck + Status: v1beta1.TaskRunStatus{ + TaskRunStatusFields: v1beta1.TaskRunStatusFields{ + Steps: []v1beta1.StepState{{ + Name: "git-source-repo-jwqcl", + ImageID: "gcr.io/tekton-releases/github.com/tektoncd/pipeline/cmd/git-init@sha256:b963f6e7a69617db57b685893256f978436277094c21d43b153994acd8a01247", + }, { + Name: "git-source-repo-repeat-again-jwqcl", + ImageID: "gcr.io/tekton-releases/github.com/tektoncd/pipeline/cmd/git-init@sha256:b963f6e7a69617db57b685893256f978436277094c21d43b153994acd8a01247", + }, { + Name: "build", + ImageID: "gcr.io/cloud-marketplace-containers/google/bazel@sha256:010a1ecd1a8c3610f12039a25b823e3a17bd3e8ae455a53e340dcfdd37a49964", + }}, + }, + }, + }, + want: []common.ProvenanceMaterial{ + { + URI: artifacts.OCIScheme + "gcr.io/tekton-releases/github.com/tektoncd/pipeline/cmd/git-init", + Digest: common.DigestSet{ + "sha256": "b963f6e7a69617db57b685893256f978436277094c21d43b153994acd8a01247", + }, + }, + { + URI: artifacts.OCIScheme + "gcr.io/cloud-marketplace-containers/google/bazel", + Digest: common.DigestSet{ + "sha256": "010a1ecd1a8c3610f12039a25b823e3a17bd3e8ae455a53e340dcfdd37a49964", + }, + }, + }, + }, { + name: "materials from step and sidecar images", + taskRun: &v1beta1.TaskRun{ //nolint:staticcheck + Status: v1beta1.TaskRunStatus{ + TaskRunStatusFields: v1beta1.TaskRunStatusFields{ + Steps: []v1beta1.StepState{{ + Name: "git-source-repo-jwqcl", + ImageID: "gcr.io/tekton-releases/github.com/tektoncd/pipeline/cmd/git-init@sha256:b963f6e7a69617db57b685893256f978436277094c21d43b153994acd8a01247", + }, { + Name: "git-source-repo-repeat-again-jwqcl", + ImageID: "gcr.io/tekton-releases/github.com/tektoncd/pipeline/cmd/git-init@sha256:b963f6e7a69617db57b685893256f978436277094c21d43b153994acd8a01247", + }, { + Name: "build", + ImageID: "gcr.io/cloud-marketplace-containers/google/bazel@sha256:010a1ecd1a8c3610f12039a25b823e3a17bd3e8ae455a53e340dcfdd37a49964", + }}, + Sidecars: []v1beta1.SidecarState{{ + Name: "sidecar-jwqcl", + ImageID: "gcr.io/tekton-releases/github.com/tektoncd/pipeline/cmd/sidecar-git-init@sha256:a1234f6e7a69617db57b685893256f978436277094c21d43b153994acd8a09567", + }}, + }, + }, + }, + want: []common.ProvenanceMaterial{ + { + URI: artifacts.OCIScheme + "gcr.io/tekton-releases/github.com/tektoncd/pipeline/cmd/git-init", + Digest: common.DigestSet{ + "sha256": "b963f6e7a69617db57b685893256f978436277094c21d43b153994acd8a01247", + }, + }, { + URI: artifacts.OCIScheme + "gcr.io/cloud-marketplace-containers/google/bazel", + Digest: common.DigestSet{ + "sha256": "010a1ecd1a8c3610f12039a25b823e3a17bd3e8ae455a53e340dcfdd37a49964", + }, + }, { + URI: artifacts.OCIScheme + "gcr.io/tekton-releases/github.com/tektoncd/pipeline/cmd/sidecar-git-init", + Digest: common.DigestSet{ + "sha256": "a1234f6e7a69617db57b685893256f978436277094c21d43b153994acd8a09567", + }, + }, + }, + }} + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctx := logtesting.TestContextWithLogger(t) + mat, err := TaskMaterials(ctx, objects.NewTaskRunObjectV1Beta1(tc.taskRun)) + if err != nil { + t.Fatalf("Did not expect an error but got %v", err) + } + if diff := cmp.Diff(tc.want, mat); diff != "" { + t.Errorf("Materials(): -want +got: %s", diff) + } + }) + } +} + +func TestPipelineMaterials(t *testing.T) { + expected := []common.ProvenanceMaterial{ + {URI: "github.com/test", Digest: common.DigestSet{"sha1": "28b123"}}, + { + URI: artifacts.OCIScheme + "gcr.io/test1/test1", + Digest: common.DigestSet{"sha256": "d4b63d3e24d6eef04a6dc0795cf8a73470688803d97c52cffa3c8d4efd3397b6"}, + }, + {URI: "github.com/catalog", Digest: common.DigestSet{"sha1": "x123"}}, + { + URI: artifacts.OCIScheme + "gcr.io/test2/test2", + Digest: common.DigestSet{"sha256": "4d6dd704ef58cb214dd826519929e92a978a57cdee43693006139c0080fd6fac"}, + }, + { + URI: artifacts.OCIScheme + "gcr.io/test3/test3", + Digest: common.DigestSet{"sha256": "f1a8b8549c179f41e27ff3db0fe1a1793e4b109da46586501a8343637b1d0478"}, + }, + {URI: "github.com/test", Digest: common.DigestSet{"sha1": "ab123"}}, + {URI: "abc", Digest: common.DigestSet{"sha256": "827521c857fdcd4374f4da5442fbae2edb01e7fbae285c3ec15673d4c1daecb7"}}, + {URI: artifacts.GitSchemePrefix + "https://git.test.com.git", Digest: common.DigestSet{"sha1": "abcd"}}, + } + ctx := logtesting.TestContextWithLogger(t) + got, err := PipelineMaterials(ctx, createPro("../../../testdata/pipeline-v1beta1/pipelinerun1.json"), &slsaconfig.SlsaConfig{DeepInspectionEnabled: false}) + if err != nil { + t.Error(err) + } + if diff := cmp.Diff(expected, got, compare.MaterialsCompareOption()); diff != "" { + t.Errorf("Materials(): -want +got: %s", diff) + } +} + +func TestStructuredResultPipelineMaterials(t *testing.T) { + want := []common.ProvenanceMaterial{ + {URI: "github.com/test", Digest: common.DigestSet{"sha1": "28b123"}}, + { + URI: artifacts.OCIScheme + "gcr.io/test1/test1", + Digest: common.DigestSet{"sha256": "d4b63d3e24d6eef04a6dc0795cf8a73470688803d97c52cffa3c8d4efd3397b6"}, + }, + {URI: "github.com/catalog", Digest: common.DigestSet{"sha1": "x123"}}, + { + URI: artifacts.OCIScheme + "gcr.io/test2/test2", + Digest: common.DigestSet{"sha256": "4d6dd704ef58cb214dd826519929e92a978a57cdee43693006139c0080fd6fac"}, + }, + { + URI: artifacts.OCIScheme + "gcr.io/test3/test3", + Digest: common.DigestSet{"sha256": "f1a8b8549c179f41e27ff3db0fe1a1793e4b109da46586501a8343637b1d0478"}, + }, + {URI: "github.com/test", Digest: common.DigestSet{"sha1": "ab123"}}, + { + URI: "abcd", + Digest: common.DigestSet{ + "sha256": "827521c857fdcd4374f4da5442fbae2edb01e7fbae285c3ec15673d4c1daecb7", + }, + }, + } + ctx := logtesting.TestContextWithLogger(t) + got, err := PipelineMaterials(ctx, createPro("../../../testdata/pipeline-v1beta1/pipelinerun_structured_results.json"), &slsaconfig.SlsaConfig{DeepInspectionEnabled: false}) + if err != nil { + t.Errorf("error while extracting materials: %v", err) + } + if diff := cmp.Diff(want, got, compare.MaterialsCompareOption()); diff != "" { + t.Errorf("materials(): -want +got: %s", diff) + } +} + +func TestFromImageID(t *testing.T) { + tests := []struct { + name string + imageID string + want common.ProvenanceMaterial + wantError error + }{{ + name: "proper ImageID", + imageID: "gcr.io/cloud-marketplace-containers/google/bazel@sha256:010a1ecd1a8c3610f12039a25b823e3a17bd3e8ae455a53e340dcfdd37a49964", + want: common.ProvenanceMaterial{ + URI: artifacts.OCIScheme + "gcr.io/cloud-marketplace-containers/google/bazel", + Digest: common.DigestSet{ + "sha256": "010a1ecd1a8c3610f12039a25b823e3a17bd3e8ae455a53e340dcfdd37a49964", + }, + }, + }, { + name: "bad ImageID", + imageID: "badImageId", + want: common.ProvenanceMaterial{}, + wantError: fmt.Errorf("expected imageID badImageId to be separable by @"), + }} + for _, tc := range tests { + mat, err := fromImageID(tc.imageID) + if err != nil { + if err.Error() != tc.wantError.Error() { + t.Fatalf("Expected error %v but got %v", tc.wantError, err) + } + } + if tc.wantError == nil { + if diff := cmp.Diff(tc.want, mat); diff != "" { + t.Errorf("materials(): -want +got: %s", diff) + } + } + } +} + +//nolint:all +func TestFromPipelineParamsAndResults(t *testing.T) { + tests := []struct { + name string + pipelineRunObjectV1Beta1 *objects.PipelineRunObjectV1Beta1 + enableDeepInspection bool + want []common.ProvenanceMaterial + }{{ + name: "from results", + pipelineRunObjectV1Beta1: objects.NewPipelineRunObjectV1Beta1(&v1beta1.PipelineRun{ + Status: v1beta1.PipelineRunStatus{ + PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{ + PipelineResults: []v1beta1.PipelineRunResult{{ + Name: "CHAINS-GIT_COMMIT", + Value: *v1beta1.NewStructuredValues("my-commit"), + }, { + Name: "CHAINS-GIT_URL", + Value: *v1beta1.NewStructuredValues("github.com/something"), + }}, + }, + }, + }), + want: []common.ProvenanceMaterial{{ + URI: "git+github.com/something.git", + Digest: common.DigestSet{ + "sha1": "my-commit", + }, + }}, + }, { + name: "from pipelinespec", + pipelineRunObjectV1Beta1: objects.NewPipelineRunObjectV1Beta1(&v1beta1.PipelineRun{ + Status: v1beta1.PipelineRunStatus{ + PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{ + PipelineSpec: &v1beta1.PipelineSpec{ + Params: []v1beta1.ParamSpec{{ + Name: "CHAINS-GIT_COMMIT", + Default: &v1beta1.ParamValue{ + StringVal: "my-commit", + }, + }, { + Name: "CHAINS-GIT_URL", + Default: &v1beta1.ParamValue{ + StringVal: "github.com/something", + }, + }}, + }, + }, + }, + }), + want: []common.ProvenanceMaterial{{ + URI: "git+github.com/something.git", + Digest: common.DigestSet{ + "sha1": "my-commit", + }, + }}, + }, { + name: "from pipelineRunSpec", + pipelineRunObjectV1Beta1: objects.NewPipelineRunObjectV1Beta1(&v1beta1.PipelineRun{ + Spec: v1beta1.PipelineRunSpec{ + Params: []v1beta1.Param{{ + Name: "CHAINS-GIT_COMMIT", + Value: v1beta1.ParamValue{ + StringVal: "my-commit", + }, + }, { + Name: "CHAINS-GIT_URL", + Value: v1beta1.ParamValue{ + StringVal: "github.com/something", + }, + }}, + }, + }), + want: []common.ProvenanceMaterial{{ + URI: "git+github.com/something.git", + Digest: common.DigestSet{ + "sha1": "my-commit", + }, + }}, + }, { + name: "from completeChain", + pipelineRunObjectV1Beta1: objects.NewPipelineRunObjectV1Beta1(&v1beta1.PipelineRun{ + Spec: v1beta1.PipelineRunSpec{ + Params: []v1beta1.Param{{ + Name: "CHAINS-GIT_URL", + Value: v1beta1.ParamValue{ + StringVal: "github.com/something", + }, + }}, + }, + Status: v1beta1.PipelineRunStatus{ + PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{ + PipelineSpec: &v1beta1.PipelineSpec{ + Params: []v1beta1.ParamSpec{{ + Name: "CHAINS-GIT_URL", + }}, + }, + PipelineResults: []v1beta1.PipelineRunResult{{ + Name: "CHAINS-GIT_COMMIT", + Value: *v1beta1.NewStructuredValues("my-commit"), + }}, + }, + }, + }), + want: []common.ProvenanceMaterial{{ + URI: "git+github.com/something.git", + Digest: common.DigestSet{ + "sha1": "my-commit", + }, + }}, + }, { + name: "deep inspection: pipelinerun param and task result", + pipelineRunObjectV1Beta1: createProWithPipelineParamAndTaskResult(), + enableDeepInspection: true, + want: []common.ProvenanceMaterial{ + { + URI: "git+github.com/pipelinerun-param.git", + Digest: common.DigestSet{ + "sha1": "115734d92807a80158b4b7af605d768c647fdb3d", + }, + }, { + URI: "github.com/childtask-result", + Digest: common.DigestSet{ + "sha1": "225734d92807a80158b4b7af605d768c647fdb3d", + }, + }, + }, + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctx := logtesting.TestContextWithLogger(t) + got := FromPipelineParamsAndResults(ctx, tc.pipelineRunObjectV1Beta1, &slsaconfig.SlsaConfig{DeepInspectionEnabled: tc.enableDeepInspection}) + if diff := cmp.Diff(tc.want, got, compare.MaterialsCompareOption()); diff != "" { + t.Errorf("FromPipelineParamsAndResults(): -want +got: %s", diff) + } + }) + } +} + +//nolint:all +func createProWithPipelineParamAndTaskResult() *objects.PipelineRunObjectV1Beta1 { + pro := objects.NewPipelineRunObjectV1Beta1(&v1beta1.PipelineRun{ + Status: v1beta1.PipelineRunStatus{ + PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{ + PipelineSpec: &v1beta1.PipelineSpec{ + Params: []v1beta1.ParamSpec{{ + Name: "CHAINS-GIT_COMMIT", + Default: &v1beta1.ParamValue{ + StringVal: "115734d92807a80158b4b7af605d768c647fdb3d", + }, + }, { + Name: "CHAINS-GIT_URL", + Default: &v1beta1.ParamValue{ + StringVal: "github.com/pipelinerun-param", + }, + }}, + }, + }, + }, + }) + + pipelineTaskName := "my-clone-task" + tr := &v1beta1.TaskRun{ + ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{objects.PipelineTaskLabel: pipelineTaskName}}, + Status: v1beta1.TaskRunStatus{ + TaskRunStatusFields: v1beta1.TaskRunStatusFields{ + CompletionTime: &metav1.Time{Time: time.Date(1995, time.December, 24, 6, 12, 12, 24, time.UTC)}, + TaskRunResults: []v1beta1.TaskRunResult{ + { + Name: "ARTIFACT_INPUTS", + Value: *v1beta1.NewObject(map[string]string{ + "uri": "github.com/childtask-result", + "digest": "sha1:225734d92807a80158b4b7af605d768c647fdb3d", + })}, + }, + }, + }, + } + + pro.AppendTaskRun(tr) + pro.Status.PipelineSpec.Tasks = []v1beta1.PipelineTask{{Name: pipelineTaskName}} + return pro +} diff --git a/pkg/chains/formats/slsa/testdata/pipeline-v1/pipelinerun-childrefs.json b/pkg/chains/formats/slsa/testdata/pipeline-v1/pipelinerun-childrefs.json new file mode 100644 index 0000000000..7fc402a02e --- /dev/null +++ b/pkg/chains/formats/slsa/testdata/pipeline-v1/pipelinerun-childrefs.json @@ -0,0 +1,131 @@ +{ + "spec": { + "params": [ + { + "name": "IMAGE", + "value": "test.io/test/image" + } + ], + "pipelineRef": { + "name": "test-pipeline" + }, + "taskRunTemplate": { + "serviceAccountName": "pipeline" + } + }, + "status": { + "startTime": "2021-03-29T09:50:00Z", + "completionTime": "2021-03-29T09:50:15Z", + "conditions": [ + { + "lastTransitionTime": "2021-03-29T09:50:15Z", + "message": "Tasks Completed: 2 (Failed: 0, Cancelled 0), Skipped: 0", + "reason": "Succeeded", + "status": "True", + "type": "Succeeded" + } + ], + "results": [ + { + "name": "CHAINS-GIT_COMMIT", + "value": "abcd" + }, + { + "name": "CHAINS-GIT_URL", + "value": "https://git.test.com" + }, + { + "name": "IMAGE_URL", + "value": "test.io/test/image" + }, + { + "name": "IMAGE_DIGEST", + "value": "sha256:827521c857fdcd4374f4da5442fbae2edb01e7fbae285c3ec15673d4c1daecb7" + } + ], + "pipelineSpec": { + "params": [ + { + "description": "Image path on registry", + "name": "IMAGE", + "type": "string" + } + ], + "results": [ + { + "description": "", + "name": "CHAINS-GIT_COMMIT", + "value": "$(tasks.git-clone.results.commit)" + }, + { + "description": "", + "name": "CHAINS-GIT_URL", + "value": "$(tasks.git-clone.results.url)" + }, + { + "description": "", + "name": "IMAGE_URL", + "value": "$(tasks.build.results.IMAGE_URL)" + }, + { + "description": "", + "name": "IMAGE_DIGEST", + "value": "$(tasks.build.results.IMAGE_DIGEST)" + } + ], + "tasks": [ + { + "name": "git-clone", + "params": [ + { + "name": "url", + "value": "https://git.test.com" + }, + { + "name": "revision", + "value": "" + } + ], + "taskRef": { + "kind": "ClusterTask", + "name": "git-clone" + } + }, + { + "name": "build", + "params": [ + { + "name": "CHAINS-GIT_COMMIT", + "value": "$(tasks.git-clone.results.commit)" + }, + { + "name": "CHAINS-GIT_URL", + "value": "$(tasks.git-clone.results.url)" + } + ], + "runAfter": [ + "git-clone" + ], + "taskRef": { + "kind": "ClusterTask", + "name": "build" + } + } + ] + }, + "childReferences": [ + { + "apiVersion": "tekton.dev/v1", + "kind": "TaskRun", + "name": "git-clone", + "pipelineTaskName": "git-clone" + }, + { + "apiVersion": "tekton.dev/v1", + "kind": "TaskRun", + "name": "taskrun-build", + "pipelineTaskName": "build" + } + ] + } +} diff --git a/pkg/chains/formats/slsa/testdata/pipeline-v1/pipelinerun1.json b/pkg/chains/formats/slsa/testdata/pipeline-v1/pipelinerun1.json new file mode 100644 index 0000000000..fda4c6ead3 --- /dev/null +++ b/pkg/chains/formats/slsa/testdata/pipeline-v1/pipelinerun1.json @@ -0,0 +1,308 @@ +{ + "spec": { + "params": [ + { + "name": "IMAGE", + "value": "test.io/test/image" + } + ], + "pipelineRef": { + "name": "test-pipeline" + }, + "taskRunTemplate": { + "serviceAccountName": "pipeline" + } + }, + "status": { + "startTime": "2021-03-29T09:50:00Z", + "completionTime": "2021-03-29T09:50:15Z", + "conditions": [ + { + "lastTransitionTime": "2021-03-29T09:50:15Z", + "message": "Tasks Completed: 2 (Failed: 0, Cancelled 0), Skipped: 0", + "reason": "Succeeded", + "status": "True", + "type": "Succeeded" + } + ], + "results": [ + { + "name": "CHAINS-GIT_COMMIT", + "value": "abcd" + }, + { + "name": "CHAINS-GIT_URL", + "value": "https://git.test.com" + }, + { + "name": "IMAGE_URL", + "value": "test.io/test/image" + }, + { + "name": "IMAGE_DIGEST", + "value": "sha256:827521c857fdcd4374f4da5442fbae2edb01e7fbae285c3ec15673d4c1daecb7" + }, + { + "name": "img-ARTIFACT_INPUTS", + "value": { + "uri": "abc","digest": "sha256:827521c857fdcd4374f4da5442fbae2edb01e7fbae285c3ec15673d4c1daecb7" + } + }, + { + "name": "img2-ARTIFACT_OUTPUTS", + "value": { + "uri": "def","digest": "sha256:" + } + }, + { + "name": "img_no_uri-ARTIFACT_OUTPUTS", + "value": { + "digest": "sha256:827521c857fdcd4374f4da5442fbae2edb01e7fbae285c3ec15673d4c1daecb7" + } + } + ], + "pipelineSpec": { + "params": [ + { + "description": "Image path on registry", + "name": "IMAGE", + "type": "string" + } + ], + "results": [ + { + "description": "", + "name": "CHAINS-GIT_COMMIT", + "value": "$(tasks.git-clone.results.commit)" + }, + { + "description": "", + "name": "CHAINS-GIT_URL", + "value": "$(tasks.git-clone.results.url)" + }, + { + "description": "", + "name": "IMAGE_URL", + "value": "$(tasks.build.results.IMAGE_URL)" + }, + { + "description": "", + "name": "IMAGE_DIGEST", + "value": "$(tasks.build.results.IMAGE_DIGEST)" + } + ], + "tasks": [ + { + "name": "git-clone", + "params": [ + { + "name": "url", + "value": "https://git.test.com" + }, + { + "name": "revision", + "value": "" + } + ], + "taskRef": { + "kind": "ClusterTask", + "name": "git-clone" + } + }, + { + "name": "build", + "params": [ + { + "name": "CHAINS-GIT_COMMIT", + "value": "$(tasks.git-clone.results.commit)" + }, + { + "name": "CHAINS-GIT_URL", + "value": "$(tasks.git-clone.results.url)" + } + ], + "taskRef": { + "kind": "ClusterTask", + "name": "build" + } + } + ] + }, + "taskRuns": { + "git-clone": { + "pipelineTaskName": "git-clone", + "status": { + "completionTime": "2021-03-29T09:50:15Z", + "conditions": [ + { + "lastTransitionTime": "2021-03-29T09:50:15Z", + "message": "All Steps have completed executing", + "reason": "Succeeded", + "status": "True", + "type": "Succeeded" + } + ], + "podName": "git-clone-pod", + "startTime": "2021-03-29T09:50:00Z", + "steps": [ + { + "container": "step-clone", + "imageID": "test.io/test/clone-image", + "name": "clone", + "terminated": { + "exitCode": 0, + "finishedAt": "2021-03-29T09:50:15Z", + "reason": "Completed", + "startedAt": "2022-05-31T19:13:27Z" + } + } + ], + "results": [ + { + "name": "commit", + "value": "abcd" + }, + { + "name": "url", + "value": "https://git.test.com" + } + ], + "taskSpec": { + "params": [ + { + "description": "Repository URL to clone from.", + "name": "url", + "type": "string" + }, + { + "default": "", + "description": "Revision to checkout. (branch, tag, sha, ref, etc...)", + "name": "revision", + "type": "string" + } + ], + "results": [ + { + "description": "The precise commit SHA that was fetched by this Task.", + "name": "commit" + }, + { + "description": "The precise URL that was fetched by this Task.", + "name": "url" + } + ], + "steps": [ + { + "env": [ + { + "name": "HOME", + "value": "$(params.userHome)" + }, + { + "name": "PARAM_URL", + "value": "$(params.url)" + } + ], + "image": "$(params.gitInitImage)", + "name": "clone", + "resources": {}, + "script": "git clone" + } + ] + } + } + }, + "taskrun-build": { + "pipelineTaskName": "build", + "status": { + "completionTime": "2021-03-29T09:50:15Z", + "conditions": [ + { + "lastTransitionTime": "2021-03-29T09:50:15Z", + "message": "All Steps have completed executing", + "reason": "Succeeded", + "status": "True", + "type": "Succeeded" + } + ], + "podName": "build-pod", + "startTime": "2021-03-29T09:50:00Z", + "steps": [ + { + "container": "step-build", + "imageID": "test.io/test/build-image", + "name": "build", + "terminated": { + "exitCode": 0, + "finishedAt": "2022-05-31T19:17:30Z", + "reason": "Completed", + "startedAt": "2021-03-29T09:50:00Z" + } + } + ], + "results": [ + { + "name": "IMAGE_DIGEST", + "value": "sha256:827521c857fdcd4374f4da5442fbae2edb01e7fbae285c3ec15673d4c1daecb7" + }, + { + "name": "IMAGE_URL", + "value": "test.io/test/image\n" + } + ], + "taskSpec": { + "params": [ + { + "description": "Git CHAINS URL", + "name": "CHAINS-GIT_URL", + "type": "string" + }, + { + "description": "Git CHAINS Commit", + "name": "CHAINS-GIT_COMMIT", + "type": "string" + } + ], + "results": [ + { + "description": "Digest of the image just built.", + "name": "IMAGE_DIGEST" + }, + { + "description": "URL of the image just built.", + "name": "IMAGE_URL" + } + ], + "steps": [ + { + "command": [ + "buildah", + "build" + ], + "image": "test.io/test/build-image", + "name": "generate" + }, + { + "command": [ + "buildah", + "push" + ], + "image": "test.io/test/build-image", + "name": "push" + } + ] + } + } + } + }, + "provenance": { + "refSource": { + "uri": "github.com/test", + "digest": { + "sha1": "28b123" + }, + "entryPoint": "pipeline.yaml" + } + } + } +} diff --git a/pkg/chains/formats/slsa/testdata/pipeline-v1/pipelinerun_structured_results.json b/pkg/chains/formats/slsa/testdata/pipeline-v1/pipelinerun_structured_results.json new file mode 100644 index 0000000000..909f821ec0 --- /dev/null +++ b/pkg/chains/formats/slsa/testdata/pipeline-v1/pipelinerun_structured_results.json @@ -0,0 +1,266 @@ +{ + "spec": { + "params": [ + { + "name": "IMAGE", + "value": "test.io/test/image" + } + ], + "pipelineRef": { + "name": "test-pipeline" + }, + "taskRunTemplate": { + "serviceAccountName": "pipeline" + } + }, + "status": { + "startTime": "2021-03-29T09:50:00Z", + "completionTime": "2021-03-29T09:50:15Z", + "conditions": [ + { + "lastTransitionTime": "2021-03-29T09:50:15Z", + "message": "Tasks Completed: 2 (Failed: 0, Cancelled 0), Skipped: 0", + "reason": "Succeeded", + "status": "True", + "type": "Succeeded" + } + ], + "results": [ + { + "name": "image-ARTIFACT_INPUTS", + "value": { + "uri": "abcd", + "digest": "sha256:827521c857fdcd4374f4da5442fbae2edb01e7fbae285c3ec15673d4c1daecb7" + } + }, + { + "name": "image-ARTIFACT_OUTPUTS", + "value": { + "uri": "hello_world", + "sha256": "827521c857fdcd4374f4da5442fbae2edb01e7fbae285c3ec15673d4c1daecb7" + } + } + ], + "pipelineSpec": { + "params": [ + { + "description": "Image path on registry", + "name": "IMAGE", + "type": "string" + } + ], + "tasks": [ + { + "name": "git-clone", + "params": [ + { + "name": "url", + "value": "https://git.test.com" + }, + { + "name": "revision", + "value": "" + } + ], + "taskRef": { + "kind": "ClusterTask", + "name": "git-clone" + } + }, + { + "name": "build", + "params": [ + { + "name": "CHAINS-GIT_COMMIT", + "value": "$(tasks.git-clone.results.commit)" + }, + { + "name": "CHAINS-GIT_URL", + "value": "$(tasks.git-clone.results.url)" + } + ], + "taskRef": { + "kind": "ClusterTask", + "name": "build" + } + } + ] + }, + "taskRuns": { + "git-clone": { + "pipelineTaskName": "git-clone", + "status": { + "completionTime": "2021-03-29T09:50:15Z", + "conditions": [ + { + "lastTransitionTime": "2021-03-29T09:50:15Z", + "message": "All Steps have completed executing", + "reason": "Succeeded", + "status": "True", + "type": "Succeeded" + } + ], + "podName": "git-clone-pod", + "startTime": "2021-03-29T09:50:00Z", + "steps": [ + { + "container": "step-clone", + "imageID": "test.io/test/clone-image", + "name": "clone", + "terminated": { + "exitCode": 0, + "finishedAt": "2021-03-29T09:50:15Z", + "reason": "Completed", + "startedAt": "2022-05-31T19:13:27Z" + } + } + ], + "results": [ + { + "name": "commit", + "value": "abcd" + }, + { + "name": "url", + "value": "https://git.test.com" + } + ], + "taskSpec": { + "params": [ + { + "description": "Repository URL to clone from.", + "name": "url", + "type": "string" + }, + { + "default": "", + "description": "Revision to checkout. (branch, tag, sha, ref, etc...)", + "name": "revision", + "type": "string" + } + ], + "results": [ + { + "description": "The precise commit SHA that was fetched by this Task.", + "name": "commit" + }, + { + "description": "The precise URL that was fetched by this Task.", + "name": "url" + } + ], + "steps": [ + { + "env": [ + { + "name": "HOME", + "value": "$(params.userHome)" + }, + { + "name": "PARAM_URL", + "value": "$(params.url)" + } + ], + "image": "$(params.gitInitImage)", + "name": "clone", + "resources": {}, + "script": "git clone" + } + ] + } + } + }, + "taskrun-build": { + "pipelineTaskName": "build", + "status": { + "completionTime": "2021-03-29T09:50:15Z", + "conditions": [ + { + "lastTransitionTime": "2021-03-29T09:50:15Z", + "message": "All Steps have completed executing", + "reason": "Succeeded", + "status": "True", + "type": "Succeeded" + } + ], + "podName": "build-pod", + "startTime": "2021-03-29T09:50:00Z", + "steps": [ + { + "container": "step-build", + "imageID": "test.io/test/build-image", + "name": "build", + "terminated": { + "exitCode": 0, + "finishedAt": "2022-05-31T19:17:30Z", + "reason": "Completed", + "startedAt": "2021-03-29T09:50:00Z" + } + } + ], + "results": [ + { + "name": "IMAGE_DIGEST", + "value": "sha256:827521c857fdcd4374f4da5442fbae2edb01e7fbae285c3ec15673d4c1daecb7" + }, + { + "name": "IMAGE_URL", + "value": "test.io/test/image\n" + } + ], + "taskSpec": { + "params": [ + { + "description": "Git CHAINS URL", + "name": "CHAINS-GIT_URL", + "type": "string" + }, + { + "description": "Git CHAINS Commit", + "name": "CHAINS-GIT_COMMIT", + "type": "string" + } + ], + "results": [ + { + "description": "Digest of the image just built.", + "name": "IMAGE_DIGEST" + }, + { + "description": "URL of the image just built.", + "name": "IMAGE_URL" + } + ], + "steps": [ + { + "command": [ + "buildah", + "build" + ], + "image": "test.io/test/build-image", + "name": "generate" + }, + { + "command": [ + "buildah", + "push" + ], + "image": "test.io/test/build-image", + "name": "push" + } + ] + } + } + } + }, + "provenance": { + "refSource": { + "uri": "github.com/test", + "digest": { + "sha1": "28b123" + }, + "entryPoint": "pipeline.yaml" + } + } + } +} diff --git a/pkg/chains/formats/slsa/testdata/pipeline-v1/taskrun-multiple-subjects.json b/pkg/chains/formats/slsa/testdata/pipeline-v1/taskrun-multiple-subjects.json new file mode 100644 index 0000000000..38ac438638 --- /dev/null +++ b/pkg/chains/formats/slsa/testdata/pipeline-v1/taskrun-multiple-subjects.json @@ -0,0 +1,56 @@ +{ + "spec": { + "params": [], + "taskRef": { + "name": "test-task", + "kind": "Task" + }, + "serviceAccountName": "default" + }, + "status": { + "conditions": [ + { + "type": "Succeeded", + "status": "True", + "lastTransitionTime": "2021-03-29T09:50:15Z", + "reason": "Succeeded", + "message": "All Steps have completed executing" + } + ], + "podName": "test-pod-name", + "steps": [ + { + "name": "step1", + "container": "step-step1", + "imageID": "docker-pullable://gcr.io/test1/test1@sha256:d4b63d3e24d6eef04a6dc0795cf8a73470688803d97c52cffa3c8d4efd3397b6" + } + ], + "results": [ + { + "name": "IMAGES", + "value": "gcr.io/myimage1@sha256:d4b63d3e24d6eef04a6dc0795cf8a73470688803d97c52cffa3c8d4efd3397b6,gcr.io/myimage2@sha256:daa1a56e13c85cf164e7d9e595006649e3a04c47fe4a8261320e18a0bf3b0367" + } + ], + "taskSpec": { + "params": [], + "results": [ + { + "name": "file1_DIGEST", + "description": "Digest of a file to push." + }, + { + "name": "file1", + "description": "some assembled file" + }, + { + "name": "file2_DIGEST", + "description": "Digest of a file to push." + }, + { + "name": "file2", + "description": "some assembled file" + } + ] + } + } +} diff --git a/pkg/chains/formats/slsa/testdata/pipeline-v1/taskrun1.json b/pkg/chains/formats/slsa/testdata/pipeline-v1/taskrun1.json new file mode 100644 index 0000000000..d42a4638e4 --- /dev/null +++ b/pkg/chains/formats/slsa/testdata/pipeline-v1/taskrun1.json @@ -0,0 +1,136 @@ +{ + "metadata": { + "name": "taskrun-build", + "labels": { + "tekton.dev/pipelineTask": "build" + } + }, + "spec": { + "params": [ + { + "name": "IMAGE", + "value": "test.io/test/image" + }, + { + "name": "CHAINS-GIT_COMMIT", + "value": "sha:taskrun" + }, + { + "name": "CHAINS-GIT_URL", + "value": "https://git.test.com" + } + ], + "taskRef": { + "name": "build", + "kind": "Task" + }, + "serviceAccountName": "default" + }, + "status": { + "startTime": "2021-03-29T09:50:00Z", + "completionTime": "2021-03-29T09:50:15Z", + "conditions": [ + { + "type": "Succeeded", + "status": "True", + "lastTransitionTime": "2021-03-29T09:50:15Z", + "reason": "Succeeded", + "message": "All Steps have completed executing" + } + ], + "podName": "test-pod-name", + "steps": [ + { + "name": "step1", + "container": "step-step1", + "imageID": "docker-pullable://gcr.io/test1/test1@sha256:d4b63d3e24d6eef04a6dc0795cf8a73470688803d97c52cffa3c8d4efd3397b6" + }, + { + "name": "step2", + "container": "step-step2", + "imageID": "docker-pullable://gcr.io/test2/test2@sha256:4d6dd704ef58cb214dd826519929e92a978a57cdee43693006139c0080fd6fac" + }, + { + "name": "step3", + "container": "step-step3", + "imageID": "docker-pullable://gcr.io/test3/test3@sha256:f1a8b8549c179f41e27ff3db0fe1a1793e4b109da46586501a8343637b1d0478" + } + ], + "results": [ + { + "name": "IMAGE_DIGEST", + "value": "sha256:827521c857fdcd4374f4da5442fbae2edb01e7fbae285c3ec15673d4c1daecb7" + }, + { + "name": "IMAGE_URL", + "value": "gcr.io/my/image" + } + ], + "taskSpec": { + "params": [ + { + "name": "IMAGE", + "type": "string" + }, + { + "name": "filename", + "type": "string" + }, + { + "name": "DOCKERFILE", + "type": "string" + }, + { + "name": "CONTEXT", + "type": "string" + }, + { + "name": "EXTRA_ARGS", + "type": "string" + }, + { + "name": "BUILDER_IMAGE", + "type": "string" + }, { + "name": "CHAINS-GIT_COMMIT", + "type": "string", + "default": "sha:task" + }, { + "name": "CHAINS-GIT_URL", + "type": "string", + "default": "https://defaultgit.test.com" + } + ], + "steps": [ + { + "name": "step1" + }, + { + "name": "step2" + }, + { + "name": "step3" + } + ], + "results": [ + { + "name": "IMAGE_DIGEST", + "description": "Digest of the image just built." + }, + { + "name": "filename_DIGEST", + "description": "Digest of the file just built." + } + ] + }, + "provenance": { + "refSource": { + "uri": "github.com/test", + "digest": { + "sha1": "ab123" + }, + "entryPoint": "build.yaml" + } + } + } +} diff --git a/pkg/chains/formats/slsa/testdata/pipeline-v1/taskrun2.json b/pkg/chains/formats/slsa/testdata/pipeline-v1/taskrun2.json new file mode 100644 index 0000000000..63b3f984b7 --- /dev/null +++ b/pkg/chains/formats/slsa/testdata/pipeline-v1/taskrun2.json @@ -0,0 +1,105 @@ +{ + "metadata": { + "name": "git-clone", + "labels": { + "tekton.dev/pipelineTask": "git-clone" + } + }, + "spec": { + "params": [ + { + "name": "url", + "value": "https://git.test.com" + }, + { + "name": "revision", + "value": "" + } + ], + "taskRef": { + "name": "git-clone", + "kind": "Task" + }, + "serviceAccountName": "default" + }, + "status": { + "startTime": "2021-03-29T09:50:00Z", + "completionTime": "2021-03-29T09:50:15Z", + "conditions": [ + { + "type": "Succeeded", + "status": "True", + "lastTransitionTime": "2021-03-29T09:50:15Z", + "reason": "Succeeded", + "message": "All Steps have completed executing" + } + ], + "podName": "test-pod-name", + "steps": [ + { + "name": "step1", + "container": "step-step1", + "imageID": "docker-pullable://gcr.io/test1/test1@sha256:d4b63d3e24d6eef04a6dc0795cf8a73470688803d97c52cffa3c8d4efd3397b6" + } + ], + "results": [ + { + "name": "some-uri_DIGEST", + "value": "sha256:d4b63d3e24d6eef04a6dc0795cf8a73470688803d97c52cffa3c8d4efd3397b6" + }, + { + "name": "some-uri", + "value": "pkg:deb/debian/curl@7.50.3-1" + } + ], + "taskSpec": { + "steps": [ + { + "env": [ + { + "name": "HOME", + "value": "$(params.userHome)" + }, + { + "name": "PARAM_URL", + "value": "$(params.url)" + } + ], + "name": "step1", + "script": "git clone" + } + ], + "params": [ + { + "name": "CHAINS-GIT_COMMIT", + "type": "string", + "default": "sha:taskdefault" + }, + { + "name": "CHAINS-GIT_URL", + "type": "string", + "default": "https://git.test.com" + } + ], + "results": [ + { + "name": "some-uri_DIGEST", + "description": "Digest of a file to push." + }, + { + "name": "some-uri", + "description": "some calculated uri" + } + ] + }, + "provenance": { + "refSource": { + "uri": "github.com/catalog", + "digest": { + "sha1": "x123" + }, + "entryPoint": "git-clone.yaml" + } + } + } +} diff --git a/pkg/chains/formats/slsa/testdata/pipelinerun-childrefs.json b/pkg/chains/formats/slsa/testdata/pipeline-v1beta1/pipelinerun-childrefs.json similarity index 100% rename from pkg/chains/formats/slsa/testdata/pipelinerun-childrefs.json rename to pkg/chains/formats/slsa/testdata/pipeline-v1beta1/pipelinerun-childrefs.json diff --git a/pkg/chains/formats/slsa/testdata/pipelinerun1.json b/pkg/chains/formats/slsa/testdata/pipeline-v1beta1/pipelinerun1.json similarity index 100% rename from pkg/chains/formats/slsa/testdata/pipelinerun1.json rename to pkg/chains/formats/slsa/testdata/pipeline-v1beta1/pipelinerun1.json diff --git a/pkg/chains/formats/slsa/testdata/pipelinerun_structured_results.json b/pkg/chains/formats/slsa/testdata/pipeline-v1beta1/pipelinerun_structured_results.json similarity index 100% rename from pkg/chains/formats/slsa/testdata/pipelinerun_structured_results.json rename to pkg/chains/formats/slsa/testdata/pipeline-v1beta1/pipelinerun_structured_results.json diff --git a/pkg/chains/formats/slsa/testdata/taskrun-multiple-subjects.json b/pkg/chains/formats/slsa/testdata/pipeline-v1beta1/taskrun-multiple-subjects.json similarity index 100% rename from pkg/chains/formats/slsa/testdata/taskrun-multiple-subjects.json rename to pkg/chains/formats/slsa/testdata/pipeline-v1beta1/taskrun-multiple-subjects.json diff --git a/pkg/chains/formats/slsa/testdata/taskrun1.json b/pkg/chains/formats/slsa/testdata/pipeline-v1beta1/taskrun1.json similarity index 100% rename from pkg/chains/formats/slsa/testdata/taskrun1.json rename to pkg/chains/formats/slsa/testdata/pipeline-v1beta1/taskrun1.json diff --git a/pkg/chains/formats/slsa/testdata/taskrun2.json b/pkg/chains/formats/slsa/testdata/pipeline-v1beta1/taskrun2.json similarity index 100% rename from pkg/chains/formats/slsa/testdata/taskrun2.json rename to pkg/chains/formats/slsa/testdata/pipeline-v1beta1/taskrun2.json diff --git a/pkg/chains/formats/slsa/testdata/v2alpha2/pipelinerun1.json b/pkg/chains/formats/slsa/testdata/slsa-v2alpha2/pipelinerun1.json similarity index 100% rename from pkg/chains/formats/slsa/testdata/v2alpha2/pipelinerun1.json rename to pkg/chains/formats/slsa/testdata/slsa-v2alpha2/pipelinerun1.json diff --git a/pkg/chains/formats/slsa/testdata/v2alpha2/pipelinerun_structured_results.json b/pkg/chains/formats/slsa/testdata/slsa-v2alpha2/pipelinerun_structured_results.json similarity index 100% rename from pkg/chains/formats/slsa/testdata/v2alpha2/pipelinerun_structured_results.json rename to pkg/chains/formats/slsa/testdata/slsa-v2alpha2/pipelinerun_structured_results.json diff --git a/pkg/chains/formats/slsa/testdata/v2alpha2/taskrun-multiple-subjects.json b/pkg/chains/formats/slsa/testdata/slsa-v2alpha2/taskrun-multiple-subjects.json similarity index 100% rename from pkg/chains/formats/slsa/testdata/v2alpha2/taskrun-multiple-subjects.json rename to pkg/chains/formats/slsa/testdata/slsa-v2alpha2/taskrun-multiple-subjects.json diff --git a/pkg/chains/formats/slsa/testdata/v2alpha2/taskrun1.json b/pkg/chains/formats/slsa/testdata/slsa-v2alpha2/taskrun1.json similarity index 100% rename from pkg/chains/formats/slsa/testdata/v2alpha2/taskrun1.json rename to pkg/chains/formats/slsa/testdata/slsa-v2alpha2/taskrun1.json diff --git a/pkg/chains/formats/slsa/testdata/v2alpha2/taskrun2.json b/pkg/chains/formats/slsa/testdata/slsa-v2alpha2/taskrun2.json similarity index 100% rename from pkg/chains/formats/slsa/testdata/v2alpha2/taskrun2.json rename to pkg/chains/formats/slsa/testdata/slsa-v2alpha2/taskrun2.json diff --git a/pkg/chains/formats/slsa/v1/intotoite6.go b/pkg/chains/formats/slsa/v1/intotoite6.go index 4ab3c8d0bf..f4c3bc8d8f 100644 --- a/pkg/chains/formats/slsa/v1/intotoite6.go +++ b/pkg/chains/formats/slsa/v1/intotoite6.go @@ -26,6 +26,8 @@ import ( "github.com/tektoncd/chains/pkg/chains/formats/slsa/v1/taskrun" "github.com/tektoncd/chains/pkg/chains/objects" "github.com/tektoncd/chains/pkg/config" + v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" ) const ( @@ -57,9 +59,32 @@ func (i *InTotoIte6) Wrap() bool { func (i *InTotoIte6) CreatePayload(ctx context.Context, obj interface{}) (interface{}, error) { switch v := obj.(type) { - case *objects.TaskRunObject: + case *objects.TaskRunObjectV1: + tro := obj.(*objects.TaskRunObjectV1) + trV1Beta1 := &v1beta1.TaskRun{} //nolint:staticcheck + if err := trV1Beta1.ConvertFrom(ctx, tro.GetObject().(*v1.TaskRun)); err != nil { + return nil, fmt.Errorf("error converting Tekton TaskRun from version v1 to v1beta1: %s", err) + } + return taskrun.GenerateAttestation(ctx, objects.NewTaskRunObjectV1Beta1(trV1Beta1), i.slsaConfig) + case *objects.PipelineRunObjectV1: + pro := obj.(*objects.PipelineRunObjectV1) + prV1Beta1 := &v1beta1.PipelineRun{} //nolint:staticcheck + if err := prV1Beta1.ConvertFrom(ctx, pro.GetObject().(*v1.PipelineRun)); err != nil { + return nil, fmt.Errorf("error converting Tekton PipelineRun from version v1 to v1beta1: %s", err) + } + proV1Beta1 := objects.NewPipelineRunObjectV1Beta1(prV1Beta1) + trs := pro.GetTaskRuns() + for _, tr := range trs { + trV1Beta1 := &v1beta1.TaskRun{} //nolint:staticcheck + if err := trV1Beta1.ConvertFrom(ctx, tr); err != nil { + return nil, fmt.Errorf("error converting Tekton TaskRun from version v1 to v1beta1: %s", err) + } + proV1Beta1.AppendTaskRun(trV1Beta1) + } + return pipelinerun.GenerateAttestation(ctx, proV1Beta1, i.slsaConfig) + case *objects.TaskRunObjectV1Beta1: return taskrun.GenerateAttestation(ctx, v, i.slsaConfig) - case *objects.PipelineRunObject: + case *objects.PipelineRunObjectV1Beta1: return pipelinerun.GenerateAttestation(ctx, v, i.slsaConfig) default: return nil, fmt.Errorf("intoto does not support type: %s", v) diff --git a/pkg/chains/formats/slsa/v1/intotoite6_test.go b/pkg/chains/formats/slsa/v1/intotoite6_test.go index a61bf2489b..9e5605a2ee 100644 --- a/pkg/chains/formats/slsa/v1/intotoite6_test.go +++ b/pkg/chains/formats/slsa/v1/intotoite6_test.go @@ -44,7 +44,7 @@ var e1BuildFinished = time.Unix(1617011415, 0) func TestTaskRunCreatePayload1(t *testing.T) { ctx := logtesting.TestContextWithLogger(t) - tr, err := objectloader.TaskRunFromFile("../testdata/taskrun1.json") + tr, err := objectloader.TaskRunV1Beta1FromFile("../testdata/pipeline-v1beta1/taskrun1.json") if err != nil { t.Fatal(err) } @@ -135,7 +135,7 @@ func TestTaskRunCreatePayload1(t *testing.T) { } i, _ := NewFormatter(cfg) - got, err := i.CreatePayload(ctx, objects.NewTaskRunObject(tr)) + got, err := i.CreatePayload(ctx, objects.NewTaskRunObjectV1Beta1(tr)) if err != nil { t.Errorf("unexpected error: %s", err.Error()) @@ -148,7 +148,7 @@ func TestTaskRunCreatePayload1(t *testing.T) { func TestPipelineRunCreatePayload(t *testing.T) { ctx := logtesting.TestContextWithLogger(t) - pr, err := objectloader.PipelineRunFromFile("../testdata/pipelinerun1.json") + pr, err := objectloader.PipelineRunV1Beta1FromFile("../testdata/pipeline-v1beta1/pipelinerun1.json") if err != nil { t.Fatal(err) } @@ -347,15 +347,15 @@ func TestPipelineRunCreatePayload(t *testing.T) { }, } - tr1, err := objectloader.TaskRunFromFile("../testdata/taskrun1.json") + tr1, err := objectloader.TaskRunV1Beta1FromFile("../testdata/pipeline-v1beta1/taskrun1.json") if err != nil { t.Errorf("error reading taskrun1: %s", err.Error()) } - tr2, err := objectloader.TaskRunFromFile("../testdata/taskrun2.json") + tr2, err := objectloader.TaskRunV1Beta1FromFile("../testdata/pipeline-v1beta1/taskrun2.json") if err != nil { t.Errorf("error reading taskrun: %s", err.Error()) } - pro := objects.NewPipelineRunObject(pr) + pro := objects.NewPipelineRunObjectV1Beta1(pr) pro.AppendTaskRun(tr1) pro.AppendTaskRun(tr2) @@ -372,7 +372,7 @@ func TestPipelineRunCreatePayload(t *testing.T) { } func TestPipelineRunCreatePayloadChildRefs(t *testing.T) { ctx := logtesting.TestContextWithLogger(t) - pr, err := objectloader.PipelineRunFromFile("../testdata/pipelinerun-childrefs.json") + pr, err := objectloader.PipelineRunV1Beta1FromFile("../testdata/pipeline-v1beta1/pipelinerun-childrefs.json") if err != nil { t.Fatal(err) } @@ -565,15 +565,15 @@ func TestPipelineRunCreatePayloadChildRefs(t *testing.T) { }, } - tr1, err := objectloader.TaskRunFromFile("../testdata/taskrun1.json") + tr1, err := objectloader.TaskRunV1Beta1FromFile("../testdata/pipeline-v1beta1/taskrun1.json") if err != nil { t.Errorf("error reading taskrun1: %s", err.Error()) } - tr2, err := objectloader.TaskRunFromFile("../testdata/taskrun2.json") + tr2, err := objectloader.TaskRunV1Beta1FromFile("../testdata/pipeline-v1beta1/taskrun2.json") if err != nil { t.Errorf("error reading taskrun: %s", err.Error()) } - pro := objects.NewPipelineRunObject(pr) + pro := objects.NewPipelineRunObjectV1Beta1(pr) pro.AppendTaskRun(tr1) pro.AppendTaskRun(tr2) @@ -590,7 +590,7 @@ func TestPipelineRunCreatePayloadChildRefs(t *testing.T) { func TestTaskRunCreatePayload2(t *testing.T) { ctx := logtesting.TestContextWithLogger(t) - tr, err := objectloader.TaskRunFromFile("../testdata/taskrun2.json") + tr, err := objectloader.TaskRunV1Beta1FromFile("../testdata/pipeline-v1beta1/taskrun2.json") if err != nil { t.Fatal(err) } @@ -653,7 +653,7 @@ func TestTaskRunCreatePayload2(t *testing.T) { }, } i, _ := NewFormatter(cfg) - got, err := i.CreatePayload(ctx, objects.NewTaskRunObject(tr)) + got, err := i.CreatePayload(ctx, objects.NewTaskRunObjectV1Beta1(tr)) if err != nil { t.Errorf("unexpected error: %s", err.Error()) @@ -666,7 +666,7 @@ func TestTaskRunCreatePayload2(t *testing.T) { func TestMultipleSubjects(t *testing.T) { ctx := logtesting.TestContextWithLogger(t) - tr, err := objectloader.TaskRunFromFile("../testdata/taskrun-multiple-subjects.json") + tr, err := objectloader.TaskRunV1Beta1FromFile("../testdata/pipeline-v1beta1/taskrun-multiple-subjects.json") if err != nil { t.Fatal(err) } @@ -724,7 +724,7 @@ func TestMultipleSubjects(t *testing.T) { } i, _ := NewFormatter(cfg) - got, err := i.CreatePayload(ctx, objects.NewTaskRunObject(tr)) + got, err := i.CreatePayload(ctx, objects.NewTaskRunObjectV1Beta1(tr)) if err != nil { t.Errorf("unexpected error: %s", err.Error()) } diff --git a/pkg/chains/formats/slsa/v1/pipelinerun/pipelinerun.go b/pkg/chains/formats/slsa/v1/pipelinerun/pipelinerun.go index e652111e5f..6c552cc746 100644 --- a/pkg/chains/formats/slsa/v1/pipelinerun/pipelinerun.go +++ b/pkg/chains/formats/slsa/v1/pipelinerun/pipelinerun.go @@ -22,7 +22,7 @@ import ( slsa "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2" "github.com/tektoncd/chains/pkg/chains/formats/slsa/attest" "github.com/tektoncd/chains/pkg/chains/formats/slsa/extract" - "github.com/tektoncd/chains/pkg/chains/formats/slsa/internal/material" + materialv1beta1 "github.com/tektoncd/chains/pkg/chains/formats/slsa/internal/material/v1beta1" "github.com/tektoncd/chains/pkg/chains/formats/slsa/internal/slsaconfig" "github.com/tektoncd/chains/pkg/chains/objects" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" @@ -47,10 +47,10 @@ type TaskAttestation struct { Results []v1beta1.TaskRunResult `json:"results,omitempty"` } -func GenerateAttestation(ctx context.Context, pro *objects.PipelineRunObject, slsaConfig *slsaconfig.SlsaConfig) (interface{}, error) { +func GenerateAttestation(ctx context.Context, pro *objects.PipelineRunObjectV1Beta1, slsaConfig *slsaconfig.SlsaConfig) (interface{}, error) { subjects := extract.SubjectDigests(ctx, pro, slsaConfig) - mat, err := material.PipelineMaterials(ctx, pro, slsaConfig) + mat, err := materialv1beta1.PipelineMaterials(ctx, pro, slsaConfig) if err != nil { return nil, err } @@ -74,7 +74,7 @@ func GenerateAttestation(ctx context.Context, pro *objects.PipelineRunObject, sl return att, nil } -func invocation(pro *objects.PipelineRunObject) slsa.ProvenanceInvocation { +func invocation(pro *objects.PipelineRunObjectV1Beta1) slsa.ProvenanceInvocation { var paramSpecs []v1beta1.ParamSpec if ps := pro.Status.PipelineSpec; ps != nil { paramSpecs = ps.Params @@ -82,7 +82,7 @@ func invocation(pro *objects.PipelineRunObject) slsa.ProvenanceInvocation { return attest.Invocation(pro, pro.Spec.Params, paramSpecs) } -func buildConfig(ctx context.Context, pro *objects.PipelineRunObject) BuildConfig { +func buildConfig(ctx context.Context, pro *objects.PipelineRunObjectV1Beta1) BuildConfig { logger := logging.FromContext(ctx) tasks := []TaskAttestation{} @@ -162,7 +162,7 @@ func buildConfig(ctx context.Context, pro *objects.PipelineRunObject) BuildConfi return BuildConfig{Tasks: tasks} } -func metadata(pro *objects.PipelineRunObject) *slsa.ProvenanceMetadata { +func metadata(pro *objects.PipelineRunObjectV1Beta1) *slsa.ProvenanceMetadata { m := &slsa.ProvenanceMetadata{} if pro.Status.StartTime != nil { utc := pro.Status.StartTime.Time.UTC() diff --git a/pkg/chains/formats/slsa/v1/pipelinerun/provenance_test.go b/pkg/chains/formats/slsa/v1/pipelinerun/provenance_test.go index 35137767c6..53489bd430 100644 --- a/pkg/chains/formats/slsa/v1/pipelinerun/provenance_test.go +++ b/pkg/chains/formats/slsa/v1/pipelinerun/provenance_test.go @@ -36,31 +36,31 @@ import ( ) // Global pro is only read from, never modified -var pro *objects.PipelineRunObject -var proStructuredResults *objects.PipelineRunObject +var pro *objects.PipelineRunObjectV1Beta1 +var proStructuredResults *objects.PipelineRunObjectV1Beta1 var e1BuildStart = time.Unix(1617011400, 0) var e1BuildFinished = time.Unix(1617011415, 0) func init() { - pro = createPro("../../testdata/pipelinerun1.json") - proStructuredResults = createPro("../../testdata/pipelinerun_structured_results.json") + pro = createPro("../../testdata/pipeline-v1beta1/pipelinerun1.json") + proStructuredResults = createPro("../../testdata/pipeline-v1beta1/pipelinerun_structured_results.json") } -func createPro(path string) *objects.PipelineRunObject { +func createPro(path string) *objects.PipelineRunObjectV1Beta1 { var err error - pr, err := objectloader.PipelineRunFromFile(path) + pr, err := objectloader.PipelineRunV1Beta1FromFile(path) if err != nil { panic(err) } - tr1, err := objectloader.TaskRunFromFile("../../testdata/taskrun1.json") + tr1, err := objectloader.TaskRunV1Beta1FromFile("../../testdata/pipeline-v1beta1/taskrun1.json") if err != nil { panic(err) } - tr2, err := objectloader.TaskRunFromFile("../../testdata/taskrun2.json") + tr2, err := objectloader.TaskRunV1Beta1FromFile("../../testdata/pipeline-v1beta1/taskrun2.json") if err != nil { panic(err) } - p := objects.NewPipelineRunObject(pr) + p := objects.NewPipelineRunObjectV1Beta1(pr) p.AppendTaskRun(tr1) p.AppendTaskRun(tr2) return p @@ -420,7 +420,7 @@ func TestBuildConfigTaskOrder(t *testing.T) { WhenExpressions: tt.whenExpressions, RunAfter: tt.runAfter, } - pro := createPro("../../testdata/pipelinerun1.json") + pro := createPro("../../testdata/pipeline-v1beta1/pipelinerun1.json") pro.Status.PipelineSpec.Tasks[BUILD_TASK] = pt ctx := logtesting.TestContextWithLogger(t) got := buildConfig(ctx, pro) @@ -461,7 +461,7 @@ func TestMetadataInTimeZone(t *testing.T) { Reproducible: false, } - zoned := objects.NewPipelineRunObject(pro.DeepCopy()) + zoned := objects.NewPipelineRunObjectV1Beta1(pro.DeepCopy()) tz := time.FixedZone("Test Time", int((12 * time.Hour).Seconds())) zoned.Status.StartTime.Time = zoned.Status.StartTime.Time.In(tz) zoned.Status.CompletionTime.Time = zoned.Status.CompletionTime.Time.In(tz) diff --git a/pkg/chains/formats/slsa/v1/taskrun/buildconfig.go b/pkg/chains/formats/slsa/v1/taskrun/buildconfig.go index 39f513d4c9..b41b1007ef 100644 --- a/pkg/chains/formats/slsa/v1/taskrun/buildconfig.go +++ b/pkg/chains/formats/slsa/v1/taskrun/buildconfig.go @@ -36,7 +36,7 @@ type Step struct { Annotations map[string]string `json:"annotations"` } -func buildConfig(tro *objects.TaskRunObject) BuildConfig { +func buildConfig(tro *objects.TaskRunObjectV1Beta1) BuildConfig { attestations := []attest.StepAttestation{} for _, stepState := range tro.Status.Steps { step := stepFromTaskRun(stepState.Name, tro) @@ -45,7 +45,7 @@ func buildConfig(tro *objects.TaskRunObject) BuildConfig { return BuildConfig{Steps: attestations} } -func stepFromTaskRun(name string, tro *objects.TaskRunObject) *v1beta1.Step { +func stepFromTaskRun(name string, tro *objects.TaskRunObjectV1Beta1) *v1beta1.Step { if tro.Status.TaskSpec != nil { for _, s := range tro.Status.TaskSpec.Steps { if s.Name == name { diff --git a/pkg/chains/formats/slsa/v1/taskrun/buildconfig_test.go b/pkg/chains/formats/slsa/v1/taskrun/buildconfig_test.go index 6ae7d9451b..8cb616995f 100644 --- a/pkg/chains/formats/slsa/v1/taskrun/buildconfig_test.go +++ b/pkg/chains/formats/slsa/v1/taskrun/buildconfig_test.go @@ -59,7 +59,7 @@ status: terminated: containerID: containerd://e2fadd134495619cccd1c48d8a9df2aed2afd64e6c62ea55135f90796102231e` - var taskRun *v1beta1.TaskRun + var taskRun *v1beta1.TaskRun //nolint:staticcheck if err := yaml.Unmarshal([]byte(taskrun), &taskRun); err != nil { t.Fatal(err) } @@ -84,7 +84,7 @@ status: }, } - got := buildConfig(objects.NewTaskRunObject(taskRun)) + got := buildConfig(objects.NewTaskRunObjectV1Beta1(taskRun)) if !reflect.DeepEqual(expected, got) { if d := cmp.Diff(expected, got); d != "" { t.Log(d) diff --git a/pkg/chains/formats/slsa/v1/taskrun/provenance_test.go b/pkg/chains/formats/slsa/v1/taskrun/provenance_test.go index 9a5ebe0d5a..f36e5ae360 100644 --- a/pkg/chains/formats/slsa/v1/taskrun/provenance_test.go +++ b/pkg/chains/formats/slsa/v1/taskrun/provenance_test.go @@ -34,7 +34,7 @@ import ( "github.com/tektoncd/chains/pkg/chains/objects" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" logtesting "knative.dev/pkg/logging/testing" "sigs.k8s.io/yaml" ) @@ -48,8 +48,8 @@ const ( ) func TestMetadata(t *testing.T) { - tr := &v1beta1.TaskRun{ - ObjectMeta: v1.ObjectMeta{ + tr := &v1beta1.TaskRun{ //nolint:staticcheck + ObjectMeta: metav1.ObjectMeta{ Name: "my-taskrun", Namespace: "my-namespace", Annotations: map[string]string{ @@ -58,8 +58,8 @@ func TestMetadata(t *testing.T) { }, Status: v1beta1.TaskRunStatus{ TaskRunStatusFields: v1beta1.TaskRunStatusFields{ - StartTime: &v1.Time{Time: time.Date(1995, time.December, 24, 6, 12, 12, 12, time.UTC)}, - CompletionTime: &v1.Time{Time: time.Date(1995, time.December, 24, 6, 12, 12, 24, time.UTC)}, + StartTime: &metav1.Time{Time: time.Date(1995, time.December, 24, 6, 12, 12, 12, time.UTC)}, + CompletionTime: &metav1.Time{Time: time.Date(1995, time.December, 24, 6, 12, 12, 24, time.UTC)}, }, }, } @@ -69,7 +69,7 @@ func TestMetadata(t *testing.T) { BuildStartedOn: &start, BuildFinishedOn: &end, } - got := Metadata(objects.NewTaskRunObject(tr)) + got := Metadata(objects.NewTaskRunObjectV1Beta1(tr)) if !reflect.DeepEqual(expected, got) { t.Fatalf("expected %v got %v", expected, got) } @@ -77,8 +77,8 @@ func TestMetadata(t *testing.T) { func TestMetadataInTimeZone(t *testing.T) { tz := time.FixedZone("Test Time", int((12 * time.Hour).Seconds())) - tr := &v1beta1.TaskRun{ - ObjectMeta: v1.ObjectMeta{ + tr := &v1beta1.TaskRun{ //nolint:staticcheck + ObjectMeta: metav1.ObjectMeta{ Name: "my-taskrun", Namespace: "my-namespace", Annotations: map[string]string{ @@ -87,8 +87,8 @@ func TestMetadataInTimeZone(t *testing.T) { }, Status: v1beta1.TaskRunStatus{ TaskRunStatusFields: v1beta1.TaskRunStatusFields{ - StartTime: &v1.Time{Time: time.Date(1995, time.December, 24, 6, 12, 12, 12, tz)}, - CompletionTime: &v1.Time{Time: time.Date(1995, time.December, 24, 6, 12, 12, 24, tz)}, + StartTime: &metav1.Time{Time: time.Date(1995, time.December, 24, 6, 12, 12, 12, tz)}, + CompletionTime: &metav1.Time{Time: time.Date(1995, time.December, 24, 6, 12, 12, 24, tz)}, }, }, } @@ -98,7 +98,7 @@ func TestMetadataInTimeZone(t *testing.T) { BuildStartedOn: &start, BuildFinishedOn: &end, } - got := Metadata(objects.NewTaskRunObject(tr)) + got := Metadata(objects.NewTaskRunObjectV1Beta1(tr)) if !reflect.DeepEqual(expected, got) { t.Fatalf("expected %v got %v", expected, got) } @@ -161,7 +161,7 @@ status: default: [] ` - var taskRun *v1beta1.TaskRun + var taskRun *v1beta1.TaskRun //nolint:staticcheck if err := yaml.Unmarshal([]byte(taskrun), &taskRun); err != nil { t.Fatal(err) } @@ -189,7 +189,7 @@ status: }, } - got := invocation(objects.NewTaskRunObject(taskRun)) + got := invocation(objects.NewTaskRunObjectV1Beta1(taskRun)) if !reflect.DeepEqual(expected, got) { if d := cmp.Diff(expected, got); d != "" { t.Log(d) @@ -199,18 +199,18 @@ status: } func TestGetSubjectDigests(t *testing.T) { - tr := &v1beta1.TaskRun{ + tr := &v1beta1.TaskRun{ //nolint:staticcheck Spec: v1beta1.TaskRunSpec{ - Resources: &v1beta1.TaskRunResources{ - Outputs: []v1beta1.TaskResourceBinding{ + Resources: &v1beta1.TaskRunResources{ //nolint:staticcheck + Outputs: []v1beta1.TaskResourceBinding{ //nolint:staticcheck { - PipelineResourceBinding: v1beta1.PipelineResourceBinding{ + PipelineResourceBinding: v1beta1.PipelineResourceBinding{ //nolint:staticcheck Name: "nil-check", }, }, { - PipelineResourceBinding: v1beta1.PipelineResourceBinding{ + PipelineResourceBinding: v1beta1.PipelineResourceBinding{ //nolint:staticcheck Name: "built-image", - ResourceSpec: &v1alpha1.PipelineResourceSpec{ + ResourceSpec: &v1alpha1.PipelineResourceSpec{ //nolint:staticcheck Type: backport.PipelineResourceTypeImage, }, }, @@ -333,7 +333,7 @@ func TestGetSubjectDigests(t *testing.T) { }, } ctx := logtesting.TestContextWithLogger(t) - tro := objects.NewTaskRunObject(tr) + tro := objects.NewTaskRunObjectV1Beta1(tr) got := extract.SubjectDigests(ctx, tro, nil) if d := cmp.Diff(want, got, compare.SubjectCompareOption()); d != "" { diff --git a/pkg/chains/formats/slsa/v1/taskrun/taskrun.go b/pkg/chains/formats/slsa/v1/taskrun/taskrun.go index 36f185a3ea..89b523e171 100644 --- a/pkg/chains/formats/slsa/v1/taskrun/taskrun.go +++ b/pkg/chains/formats/slsa/v1/taskrun/taskrun.go @@ -21,16 +21,16 @@ import ( slsa "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2" "github.com/tektoncd/chains/pkg/chains/formats/slsa/attest" "github.com/tektoncd/chains/pkg/chains/formats/slsa/extract" - "github.com/tektoncd/chains/pkg/chains/formats/slsa/internal/material" + materialv1beta1 "github.com/tektoncd/chains/pkg/chains/formats/slsa/internal/material/v1beta1" "github.com/tektoncd/chains/pkg/chains/formats/slsa/internal/slsaconfig" "github.com/tektoncd/chains/pkg/chains/objects" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" ) -func GenerateAttestation(ctx context.Context, tro *objects.TaskRunObject, slsaConfig *slsaconfig.SlsaConfig) (interface{}, error) { +func GenerateAttestation(ctx context.Context, tro *objects.TaskRunObjectV1Beta1, slsaConfig *slsaconfig.SlsaConfig) (interface{}, error) { subjects := extract.SubjectDigests(ctx, tro, slsaConfig) - mat, err := material.TaskMaterials(ctx, tro) + mat, err := materialv1beta1.TaskMaterials(ctx, tro) if err != nil { return nil, err } @@ -57,7 +57,7 @@ func GenerateAttestation(ctx context.Context, tro *objects.TaskRunObject, slsaCo // invocation describes the event that kicked off the build // we currently don't set ConfigSource because we don't know // which material the Task definition came from -func invocation(tro *objects.TaskRunObject) slsa.ProvenanceInvocation { +func invocation(tro *objects.TaskRunObjectV1Beta1) slsa.ProvenanceInvocation { var paramSpecs []v1beta1.ParamSpec if ts := tro.Status.TaskSpec; ts != nil { paramSpecs = ts.Params @@ -67,7 +67,7 @@ func invocation(tro *objects.TaskRunObject) slsa.ProvenanceInvocation { // Metadata adds taskrun's start time, completion time and reproducibility labels // to the metadata section of the generated provenance. -func Metadata(tro *objects.TaskRunObject) *slsa.ProvenanceMetadata { +func Metadata(tro *objects.TaskRunObjectV1Beta1) *slsa.ProvenanceMetadata { m := &slsa.ProvenanceMetadata{} if tro.Status.StartTime != nil { utc := tro.Status.StartTime.Time.UTC() diff --git a/pkg/chains/formats/slsa/v2alpha1/README.md b/pkg/chains/formats/slsa/v2alpha1/README.md index 6ac8ec267d..fe0060aa26 100644 --- a/pkg/chains/formats/slsa/v2alpha1/README.md +++ b/pkg/chains/formats/slsa/v2alpha1/README.md @@ -2,7 +2,7 @@ When running the following taskrun with bundle resolver referencing the [remote task](https://github.com/tektoncd/catalog/tree/main/task/git-clone/0.9): ```yaml -apiVersion: tekton.dev/v1beta1 +apiVersion: tekton.dev/v1 kind: TaskRun metadata: generateName: bundles-resolver- @@ -44,7 +44,7 @@ The following output was generated. Notice the following below: "builder": { "id": "https://tekton.dev/chains/v2" }, - "buildType": "https://chains.tekton.dev/format/slsa/v2alpha1/type/tekton.dev/v1beta1/TaskRun", + "buildType": "https://chains.tekton.dev/format/slsa/v2alpha1/type/tekton.dev/v1/TaskRun", "invocation": { "configSource": { "uri": "gcr.io/tekton-releases/catalog/upstream/git-clone", @@ -102,7 +102,7 @@ The following output was generated. Notice the following below: "EnableProvenanceInStatus": true, "ResultExtractionMethod": "termination-message", "MaxResultSize": 4096, - "CustomTaskVersion": "v1beta1" + "CustomTaskVersion": "v1" } } }, diff --git a/pkg/chains/formats/slsa/v2alpha1/slsav2.go b/pkg/chains/formats/slsa/v2alpha1/slsav2.go index bb7e1b68ee..30bd808971 100644 --- a/pkg/chains/formats/slsa/v2alpha1/slsav2.go +++ b/pkg/chains/formats/slsa/v2alpha1/slsav2.go @@ -24,6 +24,8 @@ import ( "github.com/tektoncd/chains/pkg/chains/formats/slsa/v2alpha1/taskrun" "github.com/tektoncd/chains/pkg/chains/objects" "github.com/tektoncd/chains/pkg/config" + v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" ) const ( @@ -50,8 +52,16 @@ func (s *Slsa) Wrap() bool { func (s *Slsa) CreatePayload(ctx context.Context, obj interface{}) (interface{}, error) { switch v := obj.(type) { - case *objects.TaskRunObject: + case *objects.TaskRunObjectV1: + tro := obj.(*objects.TaskRunObjectV1) + trV1Beta1 := &v1beta1.TaskRun{} //nolint:staticcheck + if err := trV1Beta1.ConvertFrom(ctx, tro.GetObject().(*v1.TaskRun)); err != nil { + return nil, fmt.Errorf("error converting Tekton TaskRun from version v1 to v1beta1: %s", err) + } + return taskrun.GenerateAttestation(ctx, s.builderID, s.Type(), objects.NewTaskRunObjectV1Beta1(trV1Beta1)) + case *objects.TaskRunObjectV1Beta1: return taskrun.GenerateAttestation(ctx, s.builderID, s.Type(), v) + default: return nil, fmt.Errorf("intoto does not support type: %s", v) } diff --git a/pkg/chains/formats/slsa/v2alpha1/slsav2_test.go b/pkg/chains/formats/slsa/v2alpha1/slsav2_test.go index c06e126ca7..7055377d00 100644 --- a/pkg/chains/formats/slsa/v2alpha1/slsav2_test.go +++ b/pkg/chains/formats/slsa/v2alpha1/slsav2_test.go @@ -47,7 +47,7 @@ var ( func TestTaskRunCreatePayload1(t *testing.T) { ctx := logtesting.TestContextWithLogger(t) - tr, err := objectloader.TaskRunFromFile("../testdata/taskrun1.json") + tr, err := objectloader.TaskRunV1Beta1FromFile("../testdata/pipeline-v1beta1/taskrun1.json") if err != nil { t.Fatal(err) } @@ -117,7 +117,7 @@ func TestTaskRunCreatePayload1(t *testing.T) { }, }, "PodTemplate": (*pod.Template)(nil), - "Resources": (*v1beta1.TaskRunResources)(nil), + "Resources": (*v1beta1.TaskRunResources)(nil), //nolint:staticcheck "Retries": 0, "ServiceAccountName": "default", "SidecarOverrides": []v1beta1.TaskRunSidecarOverride(nil), @@ -162,7 +162,7 @@ func TestTaskRunCreatePayload1(t *testing.T) { } i, _ := NewFormatter(cfg) - got, err := i.CreatePayload(ctx, objects.NewTaskRunObject(tr)) + got, err := i.CreatePayload(ctx, objects.NewTaskRunObjectV1Beta1(tr)) if err != nil { t.Errorf("unexpected error: %s", err.Error()) @@ -174,7 +174,7 @@ func TestTaskRunCreatePayload1(t *testing.T) { func TestTaskRunCreatePayload2(t *testing.T) { ctx := logtesting.TestContextWithLogger(t) - tr, err := objectloader.TaskRunFromFile("../testdata/taskrun2.json") + tr, err := objectloader.TaskRunV1Beta1FromFile("../testdata/pipeline-v1beta1/taskrun2.json") if err != nil { t.Fatal(err) } @@ -225,7 +225,7 @@ func TestTaskRunCreatePayload2(t *testing.T) { {Name: "revision", Value: v1beta1.ParamValue{Type: "string"}}, }, "PodTemplate": (*pod.Template)(nil), - "Resources": (*v1beta1.TaskRunResources)(nil), + "Resources": (*v1beta1.TaskRunResources)(nil), //nolint:staticcheck "Retries": 0, "ServiceAccountName": "default", "SidecarOverrides": []v1beta1.TaskRunSidecarOverride(nil), @@ -263,7 +263,7 @@ func TestTaskRunCreatePayload2(t *testing.T) { }, } i, _ := NewFormatter(cfg) - got, err := i.CreatePayload(ctx, objects.NewTaskRunObject(tr)) + got, err := i.CreatePayload(ctx, objects.NewTaskRunObjectV1Beta1(tr)) if err != nil { t.Errorf("unexpected error: %s", err.Error()) @@ -276,7 +276,7 @@ func TestTaskRunCreatePayload2(t *testing.T) { func TestMultipleSubjects(t *testing.T) { ctx := logtesting.TestContextWithLogger(t) - tr, err := objectloader.TaskRunFromFile("../testdata/taskrun-multiple-subjects.json") + tr, err := objectloader.TaskRunV1Beta1FromFile("../testdata/pipeline-v1beta1/taskrun-multiple-subjects.json") if err != nil { t.Fatal(err) } @@ -326,7 +326,7 @@ func TestMultipleSubjects(t *testing.T) { "Debug": (*v1beta1.TaskRunDebug)(nil), "Params": v1beta1.Params{}, "PodTemplate": (*pod.Template)(nil), - "Resources": (*v1beta1.TaskRunResources)(nil), + "Resources": (*v1beta1.TaskRunResources)(nil), //nolint:staticcheck "Retries": 0, "ServiceAccountName": "default", "SidecarOverrides": []v1beta1.TaskRunSidecarOverride(nil), @@ -361,7 +361,7 @@ func TestMultipleSubjects(t *testing.T) { } i, _ := NewFormatter(cfg) - got, err := i.CreatePayload(ctx, objects.NewTaskRunObject(tr)) + got, err := i.CreatePayload(ctx, objects.NewTaskRunObjectV1Beta1(tr)) if err != nil { t.Errorf("unexpected error: %s", err.Error()) } diff --git a/pkg/chains/formats/slsa/v2alpha1/taskrun/taskrun.go b/pkg/chains/formats/slsa/v2alpha1/taskrun/taskrun.go index f9c01dbc1a..fe63e4f019 100644 --- a/pkg/chains/formats/slsa/v2alpha1/taskrun/taskrun.go +++ b/pkg/chains/formats/slsa/v2alpha1/taskrun/taskrun.go @@ -22,7 +22,7 @@ import ( "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common" slsa "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2" "github.com/tektoncd/chains/pkg/chains/formats/slsa/extract" - "github.com/tektoncd/chains/pkg/chains/formats/slsa/internal/material" + materialv1beta1 "github.com/tektoncd/chains/pkg/chains/formats/slsa/internal/material/v1beta1" slsav1 "github.com/tektoncd/chains/pkg/chains/formats/slsa/v1/taskrun" "github.com/tektoncd/chains/pkg/chains/objects" "github.com/tektoncd/chains/pkg/config" @@ -37,9 +37,9 @@ type BuildConfig struct { TaskRunResults []v1beta1.TaskRunResult `json:"taskRunResults"` } -func GenerateAttestation(ctx context.Context, builderID string, payloadType config.PayloadType, tro *objects.TaskRunObject) (interface{}, error) { +func GenerateAttestation(ctx context.Context, builderID string, payloadType config.PayloadType, tro *objects.TaskRunObjectV1Beta1) (interface{}, error) { subjects := extract.SubjectDigests(ctx, tro, nil) - mat, err := material.TaskMaterials(ctx, tro) + mat, err := materialv1beta1.TaskMaterials(ctx, tro) if err != nil { return nil, err } @@ -63,7 +63,7 @@ func GenerateAttestation(ctx context.Context, builderID string, payloadType conf return att, nil } -func metadata(tro *objects.TaskRunObject) *slsa.ProvenanceMetadata { +func metadata(tro *objects.TaskRunObjectV1Beta1) *slsa.ProvenanceMetadata { m := slsav1.Metadata(tro) m.Completeness = slsa.ProvenanceComplete{ Parameters: true, @@ -74,7 +74,7 @@ func metadata(tro *objects.TaskRunObject) *slsa.ProvenanceMetadata { // invocation describes the event that kicked off the build // we currently don't set ConfigSource because we don't know // which material the Task definition came from -func invocation(tro *objects.TaskRunObject) slsa.ProvenanceInvocation { +func invocation(tro *objects.TaskRunObjectV1Beta1) slsa.ProvenanceInvocation { i := slsa.ProvenanceInvocation{} if p := tro.Status.Provenance; p != nil && p.RefSource != nil { i.ConfigSource = slsa.ConfigSource{ @@ -94,7 +94,7 @@ func invocation(tro *objects.TaskRunObject) slsa.ProvenanceInvocation { // invocationEnv adds the tekton feature flags that were enabled // for the taskrun. In the future, we can populate versioning information // here as well. -func invocationEnv(tro *objects.TaskRunObject) map[string]any { +func invocationEnv(tro *objects.TaskRunObjectV1Beta1) map[string]any { var iEnv map[string]any = make(map[string]any) if tro.Status.Provenance != nil && tro.Status.Provenance.FeatureFlags != nil { iEnv["tekton-pipelines-feature-flags"] = tro.Status.Provenance.FeatureFlags @@ -104,7 +104,7 @@ func invocationEnv(tro *objects.TaskRunObject) map[string]any { // invocationParams adds all fields from the task run object except // TaskRef or TaskSpec since they are in the ConfigSource or buildConfig. -func invocationParams(tro *objects.TaskRunObject) map[string]any { +func invocationParams(tro *objects.TaskRunObjectV1Beta1) map[string]any { var iParams map[string]any = make(map[string]any) skipFields := sets.NewString("TaskRef", "TaskSpec") v := reflect.ValueOf(tro.Spec) diff --git a/pkg/chains/formats/slsa/v2alpha1/taskrun/taskrun_test.go b/pkg/chains/formats/slsa/v2alpha1/taskrun/taskrun_test.go index 3327ce0222..2aff0c355d 100644 --- a/pkg/chains/formats/slsa/v2alpha1/taskrun/taskrun_test.go +++ b/pkg/chains/formats/slsa/v2alpha1/taskrun/taskrun_test.go @@ -53,7 +53,7 @@ const ( ) func TestMetadata(t *testing.T) { - tr := &v1beta1.TaskRun{ + tr := &v1beta1.TaskRun{ //nolint:staticcheck ObjectMeta: v1.ObjectMeta{ Name: "my-taskrun", Namespace: "my-namespace", @@ -74,7 +74,7 @@ func TestMetadata(t *testing.T) { BuildStartedOn: &start, BuildFinishedOn: &end, } - got := slsav1.Metadata(objects.NewTaskRunObject(tr)) + got := slsav1.Metadata(objects.NewTaskRunObjectV1Beta1(tr)) if !reflect.DeepEqual(expected, got) { t.Fatalf("expected %v got %v", expected, got) } @@ -82,7 +82,7 @@ func TestMetadata(t *testing.T) { func TestMetadataInTimeZone(t *testing.T) { tz := time.FixedZone("Test Time", int((12 * time.Hour).Seconds())) - tr := &v1beta1.TaskRun{ + tr := &v1beta1.TaskRun{ //nolint:staticcheck ObjectMeta: v1.ObjectMeta{ Name: "my-taskrun", Namespace: "my-namespace", @@ -103,7 +103,7 @@ func TestMetadataInTimeZone(t *testing.T) { BuildStartedOn: &start, BuildFinishedOn: &end, } - got := slsav1.Metadata(objects.NewTaskRunObject(tr)) + got := slsav1.Metadata(objects.NewTaskRunObjectV1Beta1(tr)) if !reflect.DeepEqual(expected, got) { t.Fatalf("expected %v got %v", expected, got) } @@ -167,7 +167,7 @@ status: RunningInEnvWithInjectedSidecars: true ` - var taskRun *v1beta1.TaskRun + var taskRun *v1beta1.TaskRun //nolint:staticcheck if err := yaml.Unmarshal([]byte(taskrun), &taskRun); err != nil { t.Fatal(err) } @@ -192,7 +192,7 @@ status: "ComputeResources": (*corev1.ResourceRequirements)(nil), "Debug": (*v1beta1.TaskRunDebug)(nil), "PodTemplate": (*pod.Template)(nil), - "Resources": (*v1beta1.TaskRunResources)(nil), + "Resources": (*v1beta1.TaskRunResources)(nil), //nolint:staticcheck "Retries": 0, "ServiceAccountName": "", "SidecarOverrides": []v1beta1.TaskRunSidecarOverride(nil), @@ -214,7 +214,7 @@ status: }, }, } - got := invocation(objects.NewTaskRunObject(taskRun)) + got := invocation(objects.NewTaskRunObjectV1Beta1(taskRun)) if !reflect.DeepEqual(expected, got) { if d := cmp.Diff(expected, got); d != "" { t.Log(d) @@ -224,18 +224,18 @@ status: } func TestGetSubjectDigests(t *testing.T) { - tr := &v1beta1.TaskRun{ + tr := &v1beta1.TaskRun{ //nolint:staticcheck Spec: v1beta1.TaskRunSpec{ - Resources: &v1beta1.TaskRunResources{ - Outputs: []v1beta1.TaskResourceBinding{ + Resources: &v1beta1.TaskRunResources{ //nolint:staticcheck + Outputs: []v1beta1.TaskResourceBinding{ //nolint:staticcheck { - PipelineResourceBinding: v1beta1.PipelineResourceBinding{ + PipelineResourceBinding: v1beta1.PipelineResourceBinding{ //nolint:staticcheck Name: "nil-check", }, }, { - PipelineResourceBinding: v1beta1.PipelineResourceBinding{ + PipelineResourceBinding: v1beta1.PipelineResourceBinding{ //nolint:staticcheck Name: "built-image", - ResourceSpec: &v1alpha1.PipelineResourceSpec{ + ResourceSpec: &v1alpha1.PipelineResourceSpec{ //nolint:staticcheck Type: backport.PipelineResourceTypeImage, }, }, @@ -357,7 +357,7 @@ func TestGetSubjectDigests(t *testing.T) { }, }, } - tro := objects.NewTaskRunObject(tr) + tro := objects.NewTaskRunObjectV1Beta1(tr) ctx := logtesting.TestContextWithLogger(t) got := extract.SubjectDigests(ctx, tro, nil) diff --git a/pkg/chains/formats/slsa/v2alpha2/internal/external_parameters/external_parameters.go b/pkg/chains/formats/slsa/v2alpha2/internal/external_parameters/external_parameters.go index 27185766a4..3427c7a2b6 100644 --- a/pkg/chains/formats/slsa/v2alpha2/internal/external_parameters/external_parameters.go +++ b/pkg/chains/formats/slsa/v2alpha2/internal/external_parameters/external_parameters.go @@ -20,10 +20,10 @@ import ( "fmt" "github.com/tektoncd/chains/pkg/chains/objects" - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" ) -func buildConfigSource(provenance *v1beta1.Provenance) map[string]string { +func buildConfigSource(provenance *v1.Provenance) map[string]string { ref := "" for alg, hex := range provenance.RefSource.Digest { ref = fmt.Sprintf("%s:%s", alg, hex) @@ -38,7 +38,7 @@ func buildConfigSource(provenance *v1beta1.Provenance) map[string]string { } // PipelineRun adds the pipeline run spec and provenance if available -func PipelineRun(pro *objects.PipelineRunObject) map[string]any { +func PipelineRun(pro *objects.PipelineRunObjectV1Beta1) map[string]any { externalParams := make(map[string]any) if provenance := pro.GetRemoteProvenance(); provenance != nil { @@ -49,7 +49,7 @@ func PipelineRun(pro *objects.PipelineRunObject) map[string]any { } // TaskRun adds the task run spec and provenance if available -func TaskRun(tro *objects.TaskRunObject) map[string]any { +func TaskRun(tro *objects.TaskRunObjectV1Beta1) map[string]any { externalParams := make(map[string]any) if provenance := tro.GetRemoteProvenance(); provenance != nil { diff --git a/pkg/chains/formats/slsa/v2alpha2/internal/external_parameters/external_parameters_test.go b/pkg/chains/formats/slsa/v2alpha2/internal/external_parameters/external_parameters_test.go index a389224676..2e13b47544 100644 --- a/pkg/chains/formats/slsa/v2alpha2/internal/external_parameters/external_parameters_test.go +++ b/pkg/chains/formats/slsa/v2alpha2/internal/external_parameters/external_parameters_test.go @@ -23,13 +23,14 @@ import ( "github.com/google/go-cmp/cmp" "github.com/tektoncd/chains/pkg/chains/objects" "github.com/tektoncd/chains/pkg/internal/objectloader" + v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" ) func TestBuildConfigSource(t *testing.T) { digest := map[string]string{"alg1": "hex1", "alg2": "hex2"} - provenance := &v1beta1.Provenance{ - RefSource: &v1beta1.RefSource{ + provenance := &v1.Provenance{ + RefSource: &v1.RefSource{ Digest: digest, URI: "https://tekton.com", EntryPoint: "/path/to/entry", @@ -65,27 +66,27 @@ func TestBuildConfigSource(t *testing.T) { } } -func createPro(path string) *objects.PipelineRunObject { - pr, err := objectloader.PipelineRunFromFile(path) +func createPro(path string) *objects.PipelineRunObjectV1Beta1 { + pr, err := objectloader.PipelineRunV1Beta1FromFile(path) if err != nil { panic(err) } - tr1, err := objectloader.TaskRunFromFile("../../../testdata/v2alpha2/taskrun1.json") + tr1, err := objectloader.TaskRunV1Beta1FromFile("../../../testdata/slsa-v2alpha2/taskrun1.json") if err != nil { panic(err) } - tr2, err := objectloader.TaskRunFromFile("../../../testdata/v2alpha2/taskrun2.json") + tr2, err := objectloader.TaskRunV1Beta1FromFile("../../../testdata/slsa-v2alpha2/taskrun2.json") if err != nil { panic(err) } - p := objects.NewPipelineRunObject(pr) + p := objects.NewPipelineRunObjectV1Beta1(pr) p.AppendTaskRun(tr1) p.AppendTaskRun(tr2) return p } func TestPipelineRun(t *testing.T) { - pro := createPro("../../../testdata/v2alpha2/pipelinerun1.json") + pro := createPro("../../../testdata/slsa-v2alpha2/pipelinerun1.json") got := PipelineRun(pro) @@ -108,11 +109,11 @@ func TestPipelineRun(t *testing.T) { } func TestTaskRun(t *testing.T) { - tr, err := objectloader.TaskRunFromFile("../../../testdata/v2alpha2/taskrun1.json") + tr, err := objectloader.TaskRunV1Beta1FromFile("../../../testdata/slsa-v2alpha2/taskrun1.json") if err != nil { t.Fatal(err) } - got := TaskRun(objects.NewTaskRunObject(tr)) + got := TaskRun(objects.NewTaskRunObjectV1Beta1(tr)) want := map[string]any{ "runSpec": v1beta1.TaskRunSpec{ diff --git a/pkg/chains/formats/slsa/v2alpha2/internal/internal_parameters/internal_parameters.go b/pkg/chains/formats/slsa/v2alpha2/internal/internal_parameters/internal_parameters.go index 844588fb54..80ab28a493 100644 --- a/pkg/chains/formats/slsa/v2alpha2/internal/internal_parameters/internal_parameters.go +++ b/pkg/chains/formats/slsa/v2alpha2/internal/internal_parameters/internal_parameters.go @@ -18,13 +18,13 @@ package internalparameters import ( "github.com/tektoncd/chains/pkg/chains/objects" - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" ) // SLSAInternalParameters provides the chains config as internalparameters func SLSAInternalParameters(tko objects.TektonObject) map[string]any { internalParams := make(map[string]any) - if provenance := tko.GetProvenance(); provenance != (*v1beta1.Provenance)(nil) && provenance.FeatureFlags != nil { + if provenance := tko.GetProvenance(); provenance != (*v1.Provenance)(nil) && provenance.FeatureFlags != nil { internalParams["tekton-pipelines-feature-flags"] = *provenance.FeatureFlags } return internalParams @@ -33,7 +33,7 @@ func SLSAInternalParameters(tko objects.TektonObject) map[string]any { // TektonInternalParameters provides the chains config as well as annotations and labels func TektonInternalParameters(tko objects.TektonObject) map[string]any { internalParams := make(map[string]any) - if provenance := tko.GetProvenance(); provenance != (*v1beta1.Provenance)(nil) && provenance.FeatureFlags != nil { + if provenance := tko.GetProvenance(); provenance != (*v1.Provenance)(nil) && provenance.FeatureFlags != nil { internalParams["tekton-pipelines-feature-flags"] = *provenance.FeatureFlags } internalParams["labels"] = tko.GetLabels() diff --git a/pkg/chains/formats/slsa/v2alpha2/internal/internal_parameters/internal_parameters_test.go b/pkg/chains/formats/slsa/v2alpha2/internal/internal_parameters/internal_parameters_test.go index 95cbee3cc0..25e1b48711 100644 --- a/pkg/chains/formats/slsa/v2alpha2/internal/internal_parameters/internal_parameters_test.go +++ b/pkg/chains/formats/slsa/v2alpha2/internal/internal_parameters/internal_parameters_test.go @@ -26,11 +26,11 @@ import ( ) func TestTektonInternalParameters(t *testing.T) { - tr, err := objectloader.TaskRunFromFile("../../../testdata/v2alpha2/taskrun1.json") + tr, err := objectloader.TaskRunV1Beta1FromFile("../../../testdata/slsa-v2alpha2/taskrun1.json") if err != nil { t.Fatal(err) } - tro := objects.NewTaskRunObject(tr) + tro := objects.NewTaskRunObjectV1Beta1(tr) got := TektonInternalParameters(tro) want := map[string]any{ "labels": tro.GetLabels(), @@ -44,11 +44,11 @@ func TestTektonInternalParameters(t *testing.T) { } func TestSLSAInternalParameters(t *testing.T) { - tr, err := objectloader.TaskRunFromFile("../../../testdata/v2alpha2/taskrun1.json") + tr, err := objectloader.TaskRunV1Beta1FromFile("../../../testdata/slsa-v2alpha2/taskrun1.json") if err != nil { t.Fatal(err) } - tro := objects.NewTaskRunObject(tr) + tro := objects.NewTaskRunObjectV1Beta1(tr) got := SLSAInternalParameters(tro) want := map[string]any{ "tekton-pipelines-feature-flags": config.FeatureFlags{EnableAPIFields: "beta", ResultExtractionMethod: "termination-message"}, diff --git a/pkg/chains/formats/slsa/v2alpha2/internal/pipelinerun/pipelinerun.go b/pkg/chains/formats/slsa/v2alpha2/internal/pipelinerun/pipelinerun.go index cab493d5f6..2820808555 100644 --- a/pkg/chains/formats/slsa/v2alpha2/internal/pipelinerun/pipelinerun.go +++ b/pkg/chains/formats/slsa/v2alpha2/internal/pipelinerun/pipelinerun.go @@ -36,7 +36,7 @@ const ( ) // GenerateAttestation generates a provenance statement with SLSA v1.0 predicate for a pipeline run. -func GenerateAttestation(ctx context.Context, pro *objects.PipelineRunObject, slsaconfig *slsaconfig.SlsaConfig) (interface{}, error) { +func GenerateAttestation(ctx context.Context, pro *objects.PipelineRunObjectV1Beta1, slsaconfig *slsaconfig.SlsaConfig) (interface{}, error) { bp, err := byproducts(pro) if err != nil { return nil, err @@ -67,7 +67,7 @@ func GenerateAttestation(ctx context.Context, pro *objects.PipelineRunObject, sl return att, nil } -func metadata(pro *objects.PipelineRunObject) slsa.BuildMetadata { +func metadata(pro *objects.PipelineRunObjectV1Beta1) slsa.BuildMetadata { m := slsa.BuildMetadata{ InvocationID: string(pro.ObjectMeta.UID), } @@ -83,7 +83,7 @@ func metadata(pro *objects.PipelineRunObject) slsa.BuildMetadata { } // byproducts contains the pipelineRunResults -func byproducts(pro *objects.PipelineRunObject) ([]slsa.ResourceDescriptor, error) { +func byproducts(pro *objects.PipelineRunObjectV1Beta1) ([]slsa.ResourceDescriptor, error) { byProd := []slsa.ResourceDescriptor{} for _, key := range pro.Status.PipelineResults { content, err := json.Marshal(key.Value) @@ -101,7 +101,7 @@ func byproducts(pro *objects.PipelineRunObject) ([]slsa.ResourceDescriptor, erro } // getBuildDefinition get the buildDefinition based on the configured buildType. This will default to the slsa buildType -func getBuildDefinition(ctx context.Context, slsaconfig *slsaconfig.SlsaConfig, pro *objects.PipelineRunObject) (slsa.ProvenanceBuildDefinition, error) { +func getBuildDefinition(ctx context.Context, slsaconfig *slsaconfig.SlsaConfig, pro *objects.PipelineRunObjectV1Beta1) (slsa.ProvenanceBuildDefinition, error) { // if buildType is not set in the chains-config, default to slsa build type buildDefinitionType := slsaconfig.BuildType if slsaconfig.BuildType == "" { diff --git a/pkg/chains/formats/slsa/v2alpha2/internal/pipelinerun/pipelinerun_test.go b/pkg/chains/formats/slsa/v2alpha2/internal/pipelinerun/pipelinerun_test.go index 34a12edc48..dfc75a4707 100644 --- a/pkg/chains/formats/slsa/v2alpha2/internal/pipelinerun/pipelinerun_test.go +++ b/pkg/chains/formats/slsa/v2alpha2/internal/pipelinerun/pipelinerun_test.go @@ -63,7 +63,7 @@ func TestMetadata(t *testing.T) { StartedOn: &start, FinishedOn: &end, } - got := metadata(objects.NewPipelineRunObject(pr)) + got := metadata(objects.NewPipelineRunObjectV1Beta1(pr)) if d := cmp.Diff(want, got); d != "" { t.Fatalf("metadata (-want, +got):\n%s", d) } @@ -94,7 +94,7 @@ func TestMetadataInTimeZone(t *testing.T) { StartedOn: &start, FinishedOn: &end, } - got := metadata(objects.NewPipelineRunObject(pr)) + got := metadata(objects.NewPipelineRunObjectV1Beta1(pr)) if d := cmp.Diff(want, got); d != "" { t.Fatalf("metadata (-want, +got):\n%s", d) } @@ -126,7 +126,7 @@ func TestByProducts(t *testing.T) { MediaType: JsonMediaType, }, } - got, err := byproducts(objects.NewPipelineRunObject(pr)) + got, err := byproducts(objects.NewPipelineRunObjectV1Beta1(pr)) if err != nil { t.Fatalf("Could not extract byproducts: %s", err) } @@ -135,20 +135,20 @@ func TestByProducts(t *testing.T) { } } -func createPro(path string) *objects.PipelineRunObject { - pr, err := objectloader.PipelineRunFromFile(path) +func createPro(path string) *objects.PipelineRunObjectV1Beta1 { + pr, err := objectloader.PipelineRunV1Beta1FromFile(path) if err != nil { panic(err) } - tr1, err := objectloader.TaskRunFromFile("../../../testdata/v2alpha2/taskrun1.json") + tr1, err := objectloader.TaskRunV1Beta1FromFile("../../../testdata/slsa-v2alpha2/taskrun1.json") if err != nil { panic(err) } - tr2, err := objectloader.TaskRunFromFile("../../../testdata/v2alpha2/taskrun2.json") + tr2, err := objectloader.TaskRunV1Beta1FromFile("../../../testdata/slsa-v2alpha2/taskrun2.json") if err != nil { panic(err) } - p := objects.NewPipelineRunObject(pr) + p := objects.NewPipelineRunObjectV1Beta1(pr) p.AppendTaskRun(tr1) p.AppendTaskRun(tr2) return p @@ -156,7 +156,7 @@ func createPro(path string) *objects.PipelineRunObject { func TestGenerateAttestation(t *testing.T) { ctx := logtesting.TestContextWithLogger(t) - pr := createPro("../../../testdata/v2alpha2/pipelinerun1.json") + pr := createPro("../../../testdata/slsa-v2alpha2/pipelinerun1.json") e1BuildStart := time.Unix(1617011400, 0) e1BuildFinished := time.Unix(1617011415, 0) @@ -275,8 +275,8 @@ func TestGenerateAttestation(t *testing.T) { } } -func getResolvedDependencies(addTasks func(*objects.TaskRunObject) (*v1resourcedescriptor.ResourceDescriptor, error)) []v1resourcedescriptor.ResourceDescriptor { //nolint:staticcheck - pr := createPro("../../../testdata/v2alpha2/pipelinerun1.json") +func getResolvedDependencies(addTasks func(*objects.TaskRunObjectV1Beta1) (*v1resourcedescriptor.ResourceDescriptor, error)) []v1resourcedescriptor.ResourceDescriptor { //nolint:staticcheck + pr := createPro("../../../testdata/slsa-v2alpha2/pipelinerun1.json") rd, err := resolveddependencies.PipelineRun(context.Background(), pr, &slsaconfig.SlsaConfig{DeepInspectionEnabled: false}, addTasks) if err != nil { return []v1resourcedescriptor.ResourceDescriptor{} @@ -285,7 +285,7 @@ func getResolvedDependencies(addTasks func(*objects.TaskRunObject) (*v1resourced } func TestGetBuildDefinition(t *testing.T) { - pr := createPro("../../../testdata/v2alpha2/pipelinerun1.json") + pr := createPro("../../../testdata/slsa-v2alpha2/pipelinerun1.json") pr.Annotations = map[string]string{ "annotation1": "annotation1", } @@ -294,7 +294,7 @@ func TestGetBuildDefinition(t *testing.T) { } tests := []struct { name string - taskContent func(*objects.TaskRunObject) (*v1resourcedescriptor.ResourceDescriptor, error) //nolint:staticcheck + taskContent func(*objects.TaskRunObjectV1Beta1) (*v1resourcedescriptor.ResourceDescriptor, error) //nolint:staticcheck config *slsaconfig.SlsaConfig want slsa.ProvenanceBuildDefinition }{ @@ -348,7 +348,7 @@ func TestGetBuildDefinition(t *testing.T) { } func TestUnsupportedBuildType(t *testing.T) { - pr := createPro("../../../testdata/v2alpha2/pipelinerun1.json") + pr := createPro("../../../testdata/slsa-v2alpha2/pipelinerun1.json") got, err := getBuildDefinition(context.Background(), &slsaconfig.SlsaConfig{BuildType: "bad-buildtype"}, pr) if err == nil { diff --git a/pkg/chains/formats/slsa/v2alpha2/internal/resolved_dependencies/resolved_dependencies.go b/pkg/chains/formats/slsa/v2alpha2/internal/resolved_dependencies/resolved_dependencies.go index 54fb4e1454..38c8d6dce3 100644 --- a/pkg/chains/formats/slsa/v2alpha2/internal/resolved_dependencies/resolved_dependencies.go +++ b/pkg/chains/formats/slsa/v2alpha2/internal/resolved_dependencies/resolved_dependencies.go @@ -22,7 +22,7 @@ import ( "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common" v1 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v1" - "github.com/tektoncd/chains/pkg/chains/formats/slsa/internal/material" + materialv1beta1 "github.com/tektoncd/chains/pkg/chains/formats/slsa/internal/material/v1beta1" "github.com/tektoncd/chains/pkg/chains/formats/slsa/internal/slsaconfig" "github.com/tektoncd/chains/pkg/chains/objects" "go.uber.org/zap" @@ -44,11 +44,11 @@ const ( // used to toggle the fields in resolvedDependencies. see AddTektonTaskDescriptor // and AddSLSATaskDescriptor -type addTaskDescriptorContent func(*objects.TaskRunObject) (*v1.ResourceDescriptor, error) //nolint:staticcheck +type addTaskDescriptorContent func(*objects.TaskRunObjectV1Beta1) (*v1.ResourceDescriptor, error) //nolint:staticcheck // the more verbose resolved dependency content. this adds the name, uri, digest // and content if possible. -func AddTektonTaskDescriptor(tr *objects.TaskRunObject) (*v1.ResourceDescriptor, error) { //nolint:staticcheck +func AddTektonTaskDescriptor(tr *objects.TaskRunObjectV1Beta1) (*v1.ResourceDescriptor, error) { //nolint:staticcheck rd := v1.ResourceDescriptor{} storedTr, err := json.Marshal(tr) if err != nil { @@ -67,7 +67,7 @@ func AddTektonTaskDescriptor(tr *objects.TaskRunObject) (*v1.ResourceDescriptor, // resolved dependency content for the more generic slsa verifiers. just logs // the name, uri and digest. -func AddSLSATaskDescriptor(tr *objects.TaskRunObject) (*v1.ResourceDescriptor, error) { //nolint:staticcheck +func AddSLSATaskDescriptor(tr *objects.TaskRunObjectV1Beta1) (*v1.ResourceDescriptor, error) { //nolint:staticcheck if tr.Status.Provenance != nil && tr.Status.Provenance.RefSource != nil { return &v1.ResourceDescriptor{ Name: pipelineTaskConfigName, @@ -131,7 +131,7 @@ func removeDuplicateResolvedDependencies(resolvedDependencies []v1.ResourceDescr // fromPipelineTask adds the resolved dependencies from pipeline tasks // such as pipeline task uri/digest for remote pipeline tasks and step and sidecar images. -func fromPipelineTask(logger *zap.SugaredLogger, pro *objects.PipelineRunObject, addTasks addTaskDescriptorContent) ([]v1.ResourceDescriptor, error) { +func fromPipelineTask(logger *zap.SugaredLogger, pro *objects.PipelineRunObjectV1Beta1, addTasks addTaskDescriptorContent) ([]v1.ResourceDescriptor, error) { pSpec := pro.Status.PipelineSpec resolvedDependencies := []v1.ResourceDescriptor{} if pSpec != nil { @@ -156,14 +156,14 @@ func fromPipelineTask(logger *zap.SugaredLogger, pro *objects.PipelineRunObject, mats := []common.ProvenanceMaterial{} // add step images - stepMaterials, err := material.FromStepImages(tr) + stepMaterials, err := materialv1beta1.FromStepImages(tr) if err != nil { return nil, err } mats = append(mats, stepMaterials...) // add sidecar images - sidecarMaterials, err := material.FromSidecarImages(tr) + sidecarMaterials, err := materialv1beta1.FromSidecarImages(tr) if err != nil { return nil, err } @@ -177,30 +177,30 @@ func fromPipelineTask(logger *zap.SugaredLogger, pro *objects.PipelineRunObject, } // taskDependencies gather all dependencies in a task and adds them to resolvedDependencies -func taskDependencies(ctx context.Context, tr *objects.TaskRunObject) ([]v1.ResourceDescriptor, error) { +func taskDependencies(ctx context.Context, tr *objects.TaskRunObjectV1Beta1) ([]v1.ResourceDescriptor, error) { var resolvedDependencies []v1.ResourceDescriptor var err error mats := []common.ProvenanceMaterial{} // add step and sidecar images - stepMaterials, err := material.FromStepImages(tr) + stepMaterials, err := materialv1beta1.FromStepImages(tr) mats = append(mats, stepMaterials...) if err != nil { return nil, err } - sidecarMaterials, err := material.FromSidecarImages(tr) + sidecarMaterials, err := materialv1beta1.FromSidecarImages(tr) if err != nil { return nil, err } mats = append(mats, sidecarMaterials...) resolvedDependencies = append(resolvedDependencies, convertMaterialsToResolvedDependencies(mats, "")...) - mats = material.FromTaskParamsAndResults(ctx, tr) + mats = materialv1beta1.FromTaskParamsAndResults(ctx, tr) // convert materials to resolved dependencies resolvedDependencies = append(resolvedDependencies, convertMaterialsToResolvedDependencies(mats, inputResultName)...) // add task resources - mats = material.FromTaskResources(ctx, tr) + mats = materialv1beta1.FromTaskResources(ctx, tr) // convert materials to resolved dependencies resolvedDependencies = append(resolvedDependencies, convertMaterialsToResolvedDependencies(mats, pipelineResourceName)...) @@ -214,7 +214,7 @@ func taskDependencies(ctx context.Context, tr *objects.TaskRunObject) ([]v1.Reso } // TaskRun constructs `predicate.resolvedDependencies` section by collecting all the artifacts that influence a taskrun such as source code repo and step&sidecar base images. -func TaskRun(ctx context.Context, tro *objects.TaskRunObject) ([]v1.ResourceDescriptor, error) { +func TaskRun(ctx context.Context, tro *objects.TaskRunObjectV1Beta1) ([]v1.ResourceDescriptor, error) { var resolvedDependencies []v1.ResourceDescriptor var err error @@ -238,7 +238,7 @@ func TaskRun(ctx context.Context, tro *objects.TaskRunObject) ([]v1.ResourceDesc } // PipelineRun constructs `predicate.resolvedDependencies` section by collecting all the artifacts that influence a pipeline run such as source code repo and step&sidecar base images. -func PipelineRun(ctx context.Context, pro *objects.PipelineRunObject, slsaconfig *slsaconfig.SlsaConfig, addTasks addTaskDescriptorContent) ([]v1.ResourceDescriptor, error) { +func PipelineRun(ctx context.Context, pro *objects.PipelineRunObjectV1Beta1, slsaconfig *slsaconfig.SlsaConfig, addTasks addTaskDescriptorContent) ([]v1.ResourceDescriptor, error) { var err error var resolvedDependencies []v1.ResourceDescriptor logger := logging.FromContext(ctx) @@ -261,7 +261,7 @@ func PipelineRun(ctx context.Context, pro *objects.PipelineRunObject, slsaconfig resolvedDependencies = append(resolvedDependencies, rds...) // add resolved dependencies from pipeline results - mats := material.FromPipelineParamsAndResults(ctx, pro, slsaconfig) + mats := materialv1beta1.FromPipelineParamsAndResults(ctx, pro, slsaconfig) // convert materials to resolved dependencies resolvedDependencies = append(resolvedDependencies, convertMaterialsToResolvedDependencies(mats, inputResultName)...) diff --git a/pkg/chains/formats/slsa/v2alpha2/internal/resolved_dependencies/resolved_dependencies_test.go b/pkg/chains/formats/slsa/v2alpha2/internal/resolved_dependencies/resolved_dependencies_test.go index d8013bdf07..42229e0a50 100644 --- a/pkg/chains/formats/slsa/v2alpha2/internal/resolved_dependencies/resolved_dependencies_test.go +++ b/pkg/chains/formats/slsa/v2alpha2/internal/resolved_dependencies/resolved_dependencies_test.go @@ -38,29 +38,29 @@ import ( const digest = "sha256:05f95b26ed10668b7183c1e2da98610e91372fa9f510046d4ce5812addad86b7" -var pro *objects.PipelineRunObject -var proStructuredResults *objects.PipelineRunObject +var pro *objects.PipelineRunObjectV1Beta1 +var proStructuredResults *objects.PipelineRunObjectV1Beta1 func init() { - pro = createPro("../../../testdata/v2alpha2/pipelinerun1.json") - proStructuredResults = createPro("../../../testdata/v2alpha2/pipelinerun_structured_results.json") + pro = createPro("../../../testdata/slsa-v2alpha2/pipelinerun1.json") + proStructuredResults = createPro("../../../testdata/slsa-v2alpha2/pipelinerun_structured_results.json") } -func createPro(path string) *objects.PipelineRunObject { +func createPro(path string) *objects.PipelineRunObjectV1Beta1 { var err error - pr, err := objectloader.PipelineRunFromFile(path) + pr, err := objectloader.PipelineRunV1Beta1FromFile(path) if err != nil { panic(err) } - tr1, err := objectloader.TaskRunFromFile("../../../testdata/v2alpha2/taskrun1.json") + tr1, err := objectloader.TaskRunV1Beta1FromFile("../../../testdata/slsa-v2alpha2/taskrun1.json") if err != nil { panic(err) } - tr2, err := objectloader.TaskRunFromFile("../../../testdata/v2alpha2/taskrun2.json") + tr2, err := objectloader.TaskRunV1Beta1FromFile("../../../testdata/slsa-v2alpha2/taskrun2.json") if err != nil { panic(err) } - p := objects.NewPipelineRunObject(pr) + p := objects.NewPipelineRunObjectV1Beta1(pr) p.AppendTaskRun(tr1) p.AppendTaskRun(tr2) return p @@ -68,11 +68,11 @@ func createPro(path string) *objects.PipelineRunObject { func tektonTaskRuns() map[string][]byte { trs := make(map[string][]byte) - tr1, err := objectloader.TaskRunFromFile("../../../testdata/v2alpha2/taskrun1.json") + tr1, err := objectloader.TaskRunV1Beta1FromFile("../../../testdata/slsa-v2alpha2/taskrun1.json") if err != nil { panic(err) } - tr2, err := objectloader.TaskRunFromFile("../../../testdata/v2alpha2/taskrun2.json") + tr2, err := objectloader.TaskRunV1Beta1FromFile("../../../testdata/slsa-v2alpha2/taskrun2.json") if err != nil { panic(err) } @@ -501,7 +501,7 @@ func TestTaskRun(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { ctx := logtesting.TestContextWithLogger(t) - rd, err := TaskRun(ctx, objects.NewTaskRunObject(tc.taskRun)) + rd, err := TaskRun(ctx, objects.NewTaskRunObjectV1Beta1(tc.taskRun)) if err != nil { t.Fatalf("Did not expect an error but got %v", err) } diff --git a/pkg/chains/formats/slsa/v2alpha2/internal/taskrun/taskrun.go b/pkg/chains/formats/slsa/v2alpha2/internal/taskrun/taskrun.go index 9f53d253f0..fb813b0d67 100644 --- a/pkg/chains/formats/slsa/v2alpha2/internal/taskrun/taskrun.go +++ b/pkg/chains/formats/slsa/v2alpha2/internal/taskrun/taskrun.go @@ -32,7 +32,7 @@ import ( const taskRunResults = "taskRunResults/%s" // GenerateAttestation generates a provenance statement with SLSA v1.0 predicate for a task run. -func GenerateAttestation(ctx context.Context, tro *objects.TaskRunObject, slsaConfig *slsaconfig.SlsaConfig) (interface{}, error) { +func GenerateAttestation(ctx context.Context, tro *objects.TaskRunObjectV1Beta1, slsaConfig *slsaconfig.SlsaConfig) (interface{}, error) { bp, err := byproducts(tro) if err != nil { return nil, err @@ -63,7 +63,7 @@ func GenerateAttestation(ctx context.Context, tro *objects.TaskRunObject, slsaCo return att, nil } -func metadata(tro *objects.TaskRunObject) slsa.BuildMetadata { +func metadata(tro *objects.TaskRunObjectV1Beta1) slsa.BuildMetadata { m := slsa.BuildMetadata{ InvocationID: string(tro.ObjectMeta.UID), } @@ -79,7 +79,7 @@ func metadata(tro *objects.TaskRunObject) slsa.BuildMetadata { } // byproducts contains the taskRunResults -func byproducts(tro *objects.TaskRunObject) ([]slsa.ResourceDescriptor, error) { +func byproducts(tro *objects.TaskRunObjectV1Beta1) ([]slsa.ResourceDescriptor, error) { byProd := []slsa.ResourceDescriptor{} for _, key := range tro.Status.TaskRunResults { content, err := json.Marshal(key.Value) @@ -97,7 +97,7 @@ func byproducts(tro *objects.TaskRunObject) ([]slsa.ResourceDescriptor, error) { } // getBuildDefinition get the buildDefinition based on the configured buildType. This will default to the slsa buildType -func getBuildDefinition(ctx context.Context, buildType string, tro *objects.TaskRunObject) (slsa.ProvenanceBuildDefinition, error) { +func getBuildDefinition(ctx context.Context, buildType string, tro *objects.TaskRunObjectV1Beta1) (slsa.ProvenanceBuildDefinition, error) { // if buildType is not set in the chains-config, default to slsa build type buildDefinitionType := buildType if buildType == "" { diff --git a/pkg/chains/formats/slsa/v2alpha2/internal/taskrun/taskrun_test.go b/pkg/chains/formats/slsa/v2alpha2/internal/taskrun/taskrun_test.go index 731d74a1cd..e7e277fd96 100644 --- a/pkg/chains/formats/slsa/v2alpha2/internal/taskrun/taskrun_test.go +++ b/pkg/chains/formats/slsa/v2alpha2/internal/taskrun/taskrun_test.go @@ -65,7 +65,7 @@ func TestMetadata(t *testing.T) { StartedOn: &start, FinishedOn: &end, } - got := metadata(objects.NewTaskRunObject(tr)) + got := metadata(objects.NewTaskRunObjectV1Beta1(tr)) if d := cmp.Diff(want, got); d != "" { t.Fatalf("metadata (-want, +got):\n%s", d) } @@ -96,7 +96,7 @@ func TestMetadataInTimeZone(t *testing.T) { StartedOn: &start, FinishedOn: &end, } - got := metadata(objects.NewTaskRunObject(tr)) + got := metadata(objects.NewTaskRunObjectV1Beta1(tr)) if d := cmp.Diff(want, got); d != "" { t.Fatalf("metadata (-want, +got):\n%s", d) } @@ -128,7 +128,7 @@ func TestByProducts(t *testing.T) { MediaType: pipelinerun.JsonMediaType, }, } - got, err := byproducts(objects.NewTaskRunObject(tr)) + got, err := byproducts(objects.NewTaskRunObjectV1Beta1(tr)) if err != nil { t.Fatalf("Could not extract byproducts: %s", err) } @@ -139,7 +139,7 @@ func TestByProducts(t *testing.T) { func TestTaskRunGenerateAttestation(t *testing.T) { ctx := logtesting.TestContextWithLogger(t) - tr, err := objectloader.TaskRunFromFile("../../../testdata/v2alpha2/taskrun1.json") + tr, err := objectloader.TaskRunV1Beta1FromFile("../../../testdata/slsa-v2alpha2/taskrun1.json") if err != nil { t.Fatal(err) } @@ -225,7 +225,7 @@ func TestTaskRunGenerateAttestation(t *testing.T) { }, } - got, err := GenerateAttestation(ctx, objects.NewTaskRunObject(tr), &slsaconfig.SlsaConfig{ + got, err := GenerateAttestation(ctx, objects.NewTaskRunObjectV1Beta1(tr), &slsaconfig.SlsaConfig{ BuilderID: "test_builder-1", BuildType: "https://tekton.dev/chains/v2/slsa", }) @@ -238,7 +238,7 @@ func TestTaskRunGenerateAttestation(t *testing.T) { } } -func getResolvedDependencies(tro *objects.TaskRunObject) []v1resourcedescriptor.ResourceDescriptor { +func getResolvedDependencies(tro *objects.TaskRunObjectV1Beta1) []v1resourcedescriptor.ResourceDescriptor { rd, err := resolveddependencies.TaskRun(context.Background(), tro) if err != nil { return []v1resourcedescriptor.ResourceDescriptor{} @@ -247,7 +247,7 @@ func getResolvedDependencies(tro *objects.TaskRunObject) []v1resourcedescriptor. } func TestGetBuildDefinition(t *testing.T) { - tr, err := objectloader.TaskRunFromFile("../../../testdata/v2alpha2/taskrun1.json") + tr, err := objectloader.TaskRunV1Beta1FromFile("../../../testdata/slsa-v2alpha2/taskrun1.json") if err != nil { t.Fatal(err) } @@ -259,7 +259,7 @@ func TestGetBuildDefinition(t *testing.T) { "label1": "label1", } - tro := objects.NewTaskRunObject(tr) + tro := objects.NewTaskRunObjectV1Beta1(tr) tests := []struct { name string buildType string @@ -317,12 +317,12 @@ func TestGetBuildDefinition(t *testing.T) { } func TestUnsupportedBuildType(t *testing.T) { - tr, err := objectloader.TaskRunFromFile("../../../testdata/v2alpha2/taskrun1.json") + tr, err := objectloader.TaskRunV1Beta1FromFile("../../../testdata/slsa-v2alpha2/taskrun1.json") if err != nil { t.Fatal(err) } - got, err := getBuildDefinition(context.Background(), "bad-buildType", objects.NewTaskRunObject(tr)) + got, err := getBuildDefinition(context.Background(), "bad-buildType", objects.NewTaskRunObjectV1Beta1(tr)) if err == nil { t.Error("getBuildDefinition(): expected error got nil") } diff --git a/pkg/chains/formats/slsa/v2alpha2/slsav2.go b/pkg/chains/formats/slsa/v2alpha2/slsav2.go index 2368b459c5..561c191ff5 100644 --- a/pkg/chains/formats/slsa/v2alpha2/slsav2.go +++ b/pkg/chains/formats/slsa/v2alpha2/slsav2.go @@ -26,6 +26,8 @@ import ( "github.com/tektoncd/chains/pkg/chains/formats/slsa/v2alpha2/internal/taskrun" "github.com/tektoncd/chains/pkg/chains/objects" "github.com/tektoncd/chains/pkg/config" + v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" ) const ( @@ -56,9 +58,32 @@ func (s *Slsa) Wrap() bool { func (s *Slsa) CreatePayload(ctx context.Context, obj interface{}) (interface{}, error) { switch v := obj.(type) { - case *objects.TaskRunObject: + case *objects.TaskRunObjectV1: + tro := obj.(*objects.TaskRunObjectV1) + trV1Beta1 := &v1beta1.TaskRun{} //nolint:staticcheck + if err := trV1Beta1.ConvertFrom(ctx, tro.GetObject().(*v1.TaskRun)); err != nil { + return nil, fmt.Errorf("error converting Tekton TaskRun from version v1 to v1beta1: %s", err) + } + return taskrun.GenerateAttestation(ctx, objects.NewTaskRunObjectV1Beta1(trV1Beta1), s.slsaConfig) + case *objects.PipelineRunObjectV1: + pro := obj.(*objects.PipelineRunObjectV1) + prV1Beta1 := &v1beta1.PipelineRun{} //nolint:staticcheck + if err := prV1Beta1.ConvertFrom(ctx, pro.GetObject().(*v1.PipelineRun)); err != nil { + return nil, fmt.Errorf("error converting Tekton PipelineRun from version v1 to v1beta1: %s", err) + } + proV1Beta1 := objects.NewPipelineRunObjectV1Beta1(prV1Beta1) + trs := pro.GetTaskRuns() + for _, tr := range trs { + trV1Beta1 := &v1beta1.TaskRun{} //nolint:staticcheck + if err := trV1Beta1.ConvertFrom(ctx, tr); err != nil { + return nil, fmt.Errorf("error converting Tekton TaskRun from version v1 to v1beta1: %s", err) + } + proV1Beta1.AppendTaskRun(trV1Beta1) + } + return pipelinerun.GenerateAttestation(ctx, proV1Beta1, s.slsaConfig) + case *objects.TaskRunObjectV1Beta1: return taskrun.GenerateAttestation(ctx, v, s.slsaConfig) - case *objects.PipelineRunObject: + case *objects.PipelineRunObjectV1Beta1: return pipelinerun.GenerateAttestation(ctx, v, s.slsaConfig) default: return nil, fmt.Errorf("intoto does not support type: %s", v) diff --git a/pkg/chains/formats/slsa/v2alpha2/slsav2_test.go b/pkg/chains/formats/slsa/v2alpha2/slsav2_test.go index 38c0107ca9..3f932350d1 100644 --- a/pkg/chains/formats/slsa/v2alpha2/slsav2_test.go +++ b/pkg/chains/formats/slsa/v2alpha2/slsav2_test.go @@ -96,7 +96,7 @@ func TestCorrectPayloadType(t *testing.T) { func TestTaskRunCreatePayload1(t *testing.T) { ctx := logtesting.TestContextWithLogger(t) - tr, err := objectloader.TaskRunFromFile("../testdata/v2alpha2/taskrun1.json") + tr, err := objectloader.TaskRunV1Beta1FromFile("../testdata/slsa-v2alpha2/taskrun1.json") if err != nil { t.Fatal(err) } @@ -187,7 +187,7 @@ func TestTaskRunCreatePayload1(t *testing.T) { i, _ := NewFormatter(cfg) - got, err := i.CreatePayload(ctx, objects.NewTaskRunObject(tr)) + got, err := i.CreatePayload(ctx, objects.NewTaskRunObjectV1Beta1(tr)) if err != nil { t.Errorf("unexpected error: %s", err.Error()) @@ -199,7 +199,7 @@ func TestTaskRunCreatePayload1(t *testing.T) { func TestTaskRunCreatePayload2(t *testing.T) { ctx := logtesting.TestContextWithLogger(t) - tr, err := objectloader.TaskRunFromFile("../testdata/v2alpha2/taskrun2.json") + tr, err := objectloader.TaskRunV1Beta1FromFile("../testdata/slsa-v2alpha2/taskrun2.json") if err != nil { t.Fatal(err) } @@ -272,7 +272,7 @@ func TestTaskRunCreatePayload2(t *testing.T) { } i, _ := NewFormatter(cfg) - got, err := i.CreatePayload(ctx, objects.NewTaskRunObject(tr)) + got, err := i.CreatePayload(ctx, objects.NewTaskRunObjectV1Beta1(tr)) if err != nil { t.Errorf("unexpected error: %s", err.Error()) @@ -285,7 +285,7 @@ func TestTaskRunCreatePayload2(t *testing.T) { func TestMultipleSubjects(t *testing.T) { ctx := logtesting.TestContextWithLogger(t) - tr, err := objectloader.TaskRunFromFile("../testdata/v2alpha2/taskrun-multiple-subjects.json") + tr, err := objectloader.TaskRunV1Beta1FromFile("../testdata/slsa-v2alpha2/taskrun-multiple-subjects.json") if err != nil { t.Fatal(err) } @@ -352,7 +352,7 @@ func TestMultipleSubjects(t *testing.T) { } i, _ := NewFormatter(cfg) - got, err := i.CreatePayload(ctx, objects.NewTaskRunObject(tr)) + got, err := i.CreatePayload(ctx, objects.NewTaskRunObjectV1Beta1(tr)) if err != nil { t.Errorf("unexpected error: %s", err.Error()) } @@ -361,20 +361,20 @@ func TestMultipleSubjects(t *testing.T) { } } -func createPro(path string) *objects.PipelineRunObject { - pr, err := objectloader.PipelineRunFromFile(path) +func createPro(path string) *objects.PipelineRunObjectV1Beta1 { + pr, err := objectloader.PipelineRunV1Beta1FromFile(path) if err != nil { panic(err) } - tr1, err := objectloader.TaskRunFromFile("../testdata/v2alpha2/taskrun1.json") + tr1, err := objectloader.TaskRunV1Beta1FromFile("../testdata/slsa-v2alpha2/taskrun1.json") if err != nil { panic(err) } - tr2, err := objectloader.TaskRunFromFile("../testdata/v2alpha2/taskrun2.json") + tr2, err := objectloader.TaskRunV1Beta1FromFile("../testdata/slsa-v2alpha2/taskrun2.json") if err != nil { panic(err) } - p := objects.NewPipelineRunObject(pr) + p := objects.NewPipelineRunObjectV1Beta1(pr) p.AppendTaskRun(tr1) p.AppendTaskRun(tr2) return p @@ -383,7 +383,7 @@ func createPro(path string) *objects.PipelineRunObject { func TestPipelineRunCreatePayload1(t *testing.T) { ctx := logtesting.TestContextWithLogger(t) - pr := createPro("../testdata/v2alpha2/pipelinerun1.json") + pr := createPro("../testdata/slsa-v2alpha2/pipelinerun1.json") cfg := config.Config{ Builder: config.BuilderConfig{ diff --git a/pkg/chains/objects/objects.go b/pkg/chains/objects/objects.go index d89204af28..95319be9ae 100644 --- a/pkg/chains/objects/objects.go +++ b/pkg/chains/objects/objects.go @@ -19,7 +19,9 @@ import ( "fmt" "strings" + "github.com/tektoncd/pipeline/pkg/apis/config" "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod" + v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" "github.com/tektoncd/pipeline/pkg/client/clientset/versioned" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -44,8 +46,8 @@ type Object interface { // of Tekton operations. (eg. PipelineRun and TaskRun results) type Result struct { Name string - Type v1beta1.ResultsType - Value v1beta1.ParamValue + Type v1.ResultsType + Value v1.ParamValue } // Tekton object is an extended Kubernetes object with operations specific @@ -58,7 +60,7 @@ type TektonObject interface { GetLatestAnnotations(ctx context.Context, clientSet versioned.Interface) (map[string]string, error) Patch(ctx context.Context, clientSet versioned.Interface, patchBytes []byte) error GetResults() []Result - GetProvenance() *v1beta1.Provenance + GetProvenance() *v1.Provenance GetServiceAccountName() string GetPullSecrets() []string IsDone() bool @@ -66,69 +68,73 @@ type TektonObject interface { SupportsTaskRunArtifact() bool SupportsPipelineRunArtifact() bool SupportsOCIArtifact() bool - GetRemoteProvenance() *v1beta1.Provenance + GetRemoteProvenance() *v1.Provenance IsRemote() bool } func NewTektonObject(i interface{}) (TektonObject, error) { switch o := i.(type) { - case *v1beta1.PipelineRun: - return NewPipelineRunObject(o), nil - case *v1beta1.TaskRun: - return NewTaskRunObject(o), nil + case *v1.PipelineRun: + return NewPipelineRunObjectV1(o), nil + case *v1.TaskRun: + return NewTaskRunObjectV1(o), nil + case *v1beta1.PipelineRun: //nolint:staticcheck + return NewPipelineRunObjectV1Beta1(o), nil + case *v1beta1.TaskRun: //nolint:staticcheck + return NewTaskRunObjectV1Beta1(o), nil default: return nil, errors.New("unrecognized type when attempting to create tekton object") } } -// TaskRunObject extends v1beta1.TaskRun with additional functions. -type TaskRunObject struct { - *v1beta1.TaskRun +// TaskRunObjectV1 extends v1.TaskRun with additional functions. +type TaskRunObjectV1 struct { + *v1.TaskRun } -var _ TektonObject = &TaskRunObject{} +var _ TektonObject = &TaskRunObjectV1{} -func NewTaskRunObject(tr *v1beta1.TaskRun) *TaskRunObject { - return &TaskRunObject{ +func NewTaskRunObjectV1(tr *v1.TaskRun) *TaskRunObjectV1 { + return &TaskRunObjectV1{ tr, } } // Get the TaskRun GroupVersionKind -func (tro *TaskRunObject) GetGVK() string { +func (tro *TaskRunObjectV1) GetGVK() string { return fmt.Sprintf("%s/%s", tro.GetGroupVersionKind().GroupVersion().String(), tro.GetGroupVersionKind().Kind) } -func (tro *TaskRunObject) GetKindName() string { +func (tro *TaskRunObjectV1) GetKindName() string { return strings.ToLower(tro.GetGroupVersionKind().Kind) } -func (tro *TaskRunObject) GetProvenance() *v1beta1.Provenance { +func (tro *TaskRunObjectV1) GetProvenance() *v1.Provenance { return tro.Status.Provenance } // Get the latest annotations on the TaskRun -func (tro *TaskRunObject) GetLatestAnnotations(ctx context.Context, clientSet versioned.Interface) (map[string]string, error) { - tr, err := clientSet.TektonV1beta1().TaskRuns(tro.Namespace).Get(ctx, tro.Name, metav1.GetOptions{}) +func (tro *TaskRunObjectV1) GetLatestAnnotations(ctx context.Context, clientSet versioned.Interface) (map[string]string, error) { + tr, err := clientSet.TektonV1().TaskRuns(tro.Namespace).Get(ctx, tro.Name, metav1.GetOptions{}) return tr.Annotations, err } // Get the base TaskRun object -func (tro *TaskRunObject) GetObject() interface{} { +func (tro *TaskRunObjectV1) GetObject() interface{} { return tro.TaskRun } // Patch the original TaskRun object -func (tro *TaskRunObject) Patch(ctx context.Context, clientSet versioned.Interface, patchBytes []byte) error { - _, err := clientSet.TektonV1beta1().TaskRuns(tro.Namespace).Patch( +func (tro *TaskRunObjectV1) Patch(ctx context.Context, clientSet versioned.Interface, patchBytes []byte) error { + _, err := clientSet.TektonV1().TaskRuns(tro.Namespace).Patch( ctx, tro.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}) return err } // Get the TaskRun results -func (tro *TaskRunObject) GetResults() []Result { +func (tro *TaskRunObjectV1) GetResults() []Result { res := []Result{} - for _, key := range tro.Status.TaskRunResults { + for _, key := range tro.Status.Results { res = append(res, Result{ Name: key.Name, Value: key.Value, @@ -137,7 +143,7 @@ func (tro *TaskRunObject) GetResults() []Result { return res } -func (tro *TaskRunObject) GetStepImages() []string { +func (tro *TaskRunObjectV1) GetStepImages() []string { images := []string{} for _, stepState := range tro.Status.Steps { images = append(images, stepState.ImageID) @@ -145,7 +151,7 @@ func (tro *TaskRunObject) GetStepImages() []string { return images } -func (tro *TaskRunObject) GetSidecarImages() []string { +func (tro *TaskRunObjectV1) GetSidecarImages() []string { images := []string{} for _, sidecarState := range tro.Status.Sidecars { images = append(images, sidecarState.ImageID) @@ -154,35 +160,35 @@ func (tro *TaskRunObject) GetSidecarImages() []string { } // Get the ServiceAccount declared in the TaskRun -func (tro *TaskRunObject) GetServiceAccountName() string { +func (tro *TaskRunObjectV1) GetServiceAccountName() string { return tro.Spec.ServiceAccountName } // Get the imgPullSecrets from the pod template -func (tro *TaskRunObject) GetPullSecrets() []string { +func (tro *TaskRunObjectV1) GetPullSecrets() []string { return getPodPullSecrets(tro.Spec.PodTemplate) } -func (tro *TaskRunObject) SupportsTaskRunArtifact() bool { +func (tro *TaskRunObjectV1) SupportsTaskRunArtifact() bool { return true } -func (tro *TaskRunObject) SupportsPipelineRunArtifact() bool { +func (tro *TaskRunObjectV1) SupportsPipelineRunArtifact() bool { return false } -func (tro *TaskRunObject) SupportsOCIArtifact() bool { +func (tro *TaskRunObjectV1) SupportsOCIArtifact() bool { return true } -func (tro *TaskRunObject) GetRemoteProvenance() *v1beta1.Provenance { +func (tro *TaskRunObjectV1) GetRemoteProvenance() *v1.Provenance { if t := tro.Status.Provenance; t != nil && t.RefSource != nil && tro.IsRemote() { return tro.Status.Provenance } return nil } -func (tro *TaskRunObject) IsRemote() bool { +func (tro *TaskRunObjectV1) IsRemote() bool { isRemoteTask := false if tro.Spec.TaskRef != nil { if tro.Spec.TaskRef.Resolver != "" && tro.Spec.TaskRef.Resolver != "Cluster" { @@ -192,57 +198,57 @@ func (tro *TaskRunObject) IsRemote() bool { return isRemoteTask } -// PipelineRunObject extends v1beta1.PipelineRun with additional functions. -type PipelineRunObject struct { +// PipelineRunObjectV1 extends v1.PipelineRun with additional functions. +type PipelineRunObjectV1 struct { // The base PipelineRun - *v1beta1.PipelineRun + *v1.PipelineRun // taskRuns that were apart of this PipelineRun - taskRuns []*v1beta1.TaskRun + taskRuns []*v1.TaskRun } -var _ TektonObject = &PipelineRunObject{} +var _ TektonObject = &PipelineRunObjectV1{} -func NewPipelineRunObject(pr *v1beta1.PipelineRun) *PipelineRunObject { - return &PipelineRunObject{ +func NewPipelineRunObjectV1(pr *v1.PipelineRun) *PipelineRunObjectV1 { + return &PipelineRunObjectV1{ PipelineRun: pr, } } // Get the PipelineRun GroupVersionKind -func (pro *PipelineRunObject) GetGVK() string { +func (pro *PipelineRunObjectV1) GetGVK() string { return fmt.Sprintf("%s/%s", pro.GetGroupVersionKind().GroupVersion().String(), pro.GetGroupVersionKind().Kind) } -func (pro *PipelineRunObject) GetKindName() string { +func (pro *PipelineRunObjectV1) GetKindName() string { return strings.ToLower(pro.GetGroupVersionKind().Kind) } // Request the current annotations on the PipelineRun object -func (pro *PipelineRunObject) GetLatestAnnotations(ctx context.Context, clientSet versioned.Interface) (map[string]string, error) { - pr, err := clientSet.TektonV1beta1().PipelineRuns(pro.Namespace).Get(ctx, pro.Name, metav1.GetOptions{}) +func (pro *PipelineRunObjectV1) GetLatestAnnotations(ctx context.Context, clientSet versioned.Interface) (map[string]string, error) { + pr, err := clientSet.TektonV1().PipelineRuns(pro.Namespace).Get(ctx, pro.Name, metav1.GetOptions{}) return pr.Annotations, err } // Get the base PipelineRun -func (pro *PipelineRunObject) GetObject() interface{} { +func (pro *PipelineRunObjectV1) GetObject() interface{} { return pro.PipelineRun } // Patch the original PipelineRun object -func (pro *PipelineRunObject) Patch(ctx context.Context, clientSet versioned.Interface, patchBytes []byte) error { - _, err := clientSet.TektonV1beta1().PipelineRuns(pro.Namespace).Patch( +func (pro *PipelineRunObjectV1) Patch(ctx context.Context, clientSet versioned.Interface, patchBytes []byte) error { + _, err := clientSet.TektonV1().PipelineRuns(pro.Namespace).Patch( ctx, pro.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}) return err } -func (pro *PipelineRunObject) GetProvenance() *v1beta1.Provenance { +func (pro *PipelineRunObjectV1) GetProvenance() *v1.Provenance { return pro.Status.Provenance } // Get the resolved Pipelinerun results -func (pro *PipelineRunObject) GetResults() []Result { +func (pro *PipelineRunObjectV1) GetResults() []Result { res := []Result{} - for _, key := range pro.Status.PipelineResults { + for _, key := range pro.Status.Results { res = append(res, Result{ Name: key.Name, Value: key.Value, @@ -252,56 +258,61 @@ func (pro *PipelineRunObject) GetResults() []Result { } // Get the ServiceAccount declared in the PipelineRun -func (pro *PipelineRunObject) GetServiceAccountName() string { - return pro.Spec.ServiceAccountName +func (pro *PipelineRunObjectV1) GetServiceAccountName() string { + return pro.Spec.TaskRunTemplate.ServiceAccountName } // Get the ServiceAccount declared in the PipelineRun -func (pro *PipelineRunObject) IsSuccessful() bool { +func (pro *PipelineRunObjectV1) IsSuccessful() bool { return pro.Status.GetCondition(apis.ConditionSucceeded).IsTrue() } // Append TaskRuns to this PipelineRun -func (pro *PipelineRunObject) AppendTaskRun(tr *v1beta1.TaskRun) { +func (pro *PipelineRunObjectV1) AppendTaskRun(tr *v1.TaskRun) { pro.taskRuns = append(pro.taskRuns, tr) } +// Append TaskRuns to this PipelineRun +func (pro *PipelineRunObjectV1) GetTaskRuns() []*v1.TaskRun { //nolint:staticcheck + return pro.taskRuns +} + // Get the associated TaskRun via the Task name -func (pro *PipelineRunObject) GetTaskRunFromTask(taskName string) *TaskRunObject { +func (pro *PipelineRunObjectV1) GetTaskRunFromTask(taskName string) *TaskRunObjectV1 { for _, tr := range pro.taskRuns { val, ok := tr.Labels[PipelineTaskLabel] if ok && val == taskName { - return NewTaskRunObject(tr) + return NewTaskRunObjectV1(tr) } } return nil } // Get the imgPullSecrets from the pod template -func (pro *PipelineRunObject) GetPullSecrets() []string { - return getPodPullSecrets(pro.Spec.PodTemplate) +func (pro *PipelineRunObjectV1) GetPullSecrets() []string { + return getPodPullSecrets(pro.Spec.TaskRunTemplate.PodTemplate) } -func (pro *PipelineRunObject) SupportsTaskRunArtifact() bool { +func (pro *PipelineRunObjectV1) SupportsTaskRunArtifact() bool { return false } -func (pro *PipelineRunObject) SupportsPipelineRunArtifact() bool { +func (pro *PipelineRunObjectV1) SupportsPipelineRunArtifact() bool { return true } -func (pro *PipelineRunObject) SupportsOCIArtifact() bool { +func (pro *PipelineRunObjectV1) SupportsOCIArtifact() bool { return false } -func (pro *PipelineRunObject) GetRemoteProvenance() *v1beta1.Provenance { +func (pro *PipelineRunObjectV1) GetRemoteProvenance() *v1.Provenance { if p := pro.Status.Provenance; p != nil && p.RefSource != nil && pro.IsRemote() { return pro.Status.Provenance } return nil } -func (pro *PipelineRunObject) IsRemote() bool { +func (pro *PipelineRunObjectV1) IsRemote() bool { isRemotePipeline := false if pro.Spec.PipelineRef != nil { if pro.Spec.PipelineRef.Resolver != "" && pro.Spec.PipelineRef.Resolver != "Cluster" { @@ -321,3 +332,295 @@ func getPodPullSecrets(podTemplate *pod.Template) []string { } return imgPullSecrets } + +// PipelineRunObjectV1Beta1 extends v1.PipelineRun with additional functions. +type PipelineRunObjectV1Beta1 struct { + // The base PipelineRun + *v1beta1.PipelineRun + // taskRuns that were apart of this PipelineRun + taskRuns []*v1beta1.TaskRun //nolint:staticcheck +} + +var _ TektonObject = &PipelineRunObjectV1Beta1{} + +func NewPipelineRunObjectV1Beta1(pr *v1beta1.PipelineRun) *PipelineRunObjectV1Beta1 { //nolint:staticcheck + return &PipelineRunObjectV1Beta1{ + PipelineRun: pr, + } +} + +// Get the PipelineRun GroupVersionKind +func (pro *PipelineRunObjectV1Beta1) GetGVK() string { + return fmt.Sprintf("%s/%s", pro.GetGroupVersionKind().GroupVersion().String(), pro.GetGroupVersionKind().Kind) +} + +func (pro *PipelineRunObjectV1Beta1) GetKindName() string { + return strings.ToLower(pro.GetGroupVersionKind().Kind) +} + +// Request the current annotations on the PipelineRun object +func (pro *PipelineRunObjectV1Beta1) GetLatestAnnotations(ctx context.Context, clientSet versioned.Interface) (map[string]string, error) { + pr, err := clientSet.TektonV1beta1().PipelineRuns(pro.Namespace).Get(ctx, pro.Name, metav1.GetOptions{}) + return pr.Annotations, err +} + +// Get the base PipelineRun +func (pro *PipelineRunObjectV1Beta1) GetObject() interface{} { + return pro.PipelineRun +} + +// Patch the original PipelineRun object +func (pro *PipelineRunObjectV1Beta1) Patch(ctx context.Context, clientSet versioned.Interface, patchBytes []byte) error { + _, err := clientSet.TektonV1beta1().PipelineRuns(pro.Namespace).Patch( + ctx, pro.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}) + return err +} + +func (pro *PipelineRunObjectV1Beta1) GetProvenance() *v1.Provenance { + var rs *v1.RefSource + if pro.Status.Provenance != nil && pro.Status.Provenance.RefSource != nil { + rs = &v1.RefSource{ + URI: pro.Status.Provenance.RefSource.URI, + Digest: pro.Status.Provenance.RefSource.Digest, + EntryPoint: pro.Status.Provenance.RefSource.EntryPoint, + } + } else if pro.Status.Provenance != nil && pro.Status.Provenance.ConfigSource != nil { //nolint:staticcheck + rs = &v1.RefSource{ + URI: pro.Status.Provenance.ConfigSource.URI, //nolint:staticcheck + Digest: pro.Status.Provenance.ConfigSource.Digest, //nolint:staticcheck + EntryPoint: pro.Status.Provenance.ConfigSource.EntryPoint, //nolint:staticcheck + } + } + + var ff *config.FeatureFlags + if pro.Status.Provenance != nil { + ff = pro.Status.Provenance.FeatureFlags + } + + return &v1.Provenance{ + RefSource: rs, + FeatureFlags: ff, + } +} + +// Get the resolved Pipelinerun results +func (pro *PipelineRunObjectV1Beta1) GetResults() []Result { + res := []Result{} + for _, key := range pro.Status.PipelineResults { + res = append(res, Result{ + Name: key.Name, + Value: v1.ParamValue{ + ArrayVal: key.Value.ArrayVal, + ObjectVal: key.Value.ObjectVal, + StringVal: key.Value.StringVal, + Type: v1.ParamType(key.Value.Type), + }, + }) + } + return res +} + +// Get the ServiceAccount declared in the PipelineRun +func (pro *PipelineRunObjectV1Beta1) GetServiceAccountName() string { + return pro.Spec.ServiceAccountName +} + +// Get the ServiceAccount declared in the PipelineRun +func (pro *PipelineRunObjectV1Beta1) IsSuccessful() bool { + return pro.Status.GetCondition(apis.ConditionSucceeded).IsTrue() +} + +// Append TaskRuns to this PipelineRun +func (pro *PipelineRunObjectV1Beta1) AppendTaskRun(tr *v1beta1.TaskRun) { //nolint:staticcheck + pro.taskRuns = append(pro.taskRuns, tr) +} + +// Get the associated TaskRun via the Task name +func (pro *PipelineRunObjectV1Beta1) GetTaskRunFromTask(taskName string) *TaskRunObjectV1Beta1 { + for _, tr := range pro.taskRuns { + val, ok := tr.Labels[PipelineTaskLabel] + if ok && val == taskName { + return NewTaskRunObjectV1Beta1(tr) + } + } + return nil +} + +// Get the imgPullSecrets from the pod template +func (pro *PipelineRunObjectV1Beta1) GetPullSecrets() []string { + return getPodPullSecrets(pro.Spec.PodTemplate) +} + +func (pro *PipelineRunObjectV1Beta1) SupportsTaskRunArtifact() bool { + return false +} + +func (pro *PipelineRunObjectV1Beta1) SupportsPipelineRunArtifact() bool { + return true +} + +func (pro *PipelineRunObjectV1Beta1) SupportsOCIArtifact() bool { + return false +} + +func (pro *PipelineRunObjectV1Beta1) GetRemoteProvenance() *v1.Provenance { + if p := pro.Status.Provenance; p != nil && p.RefSource != nil && pro.IsRemote() { + return &v1.Provenance{ + RefSource: pro.GetProvenance().RefSource, + FeatureFlags: pro.GetProvenance().FeatureFlags, + } + } + return nil +} + +func (pro *PipelineRunObjectV1Beta1) IsRemote() bool { + isRemotePipeline := false + if pro.Spec.PipelineRef != nil { + if pro.Spec.PipelineRef.Resolver != "" && pro.Spec.PipelineRef.Resolver != "Cluster" { + isRemotePipeline = true + } + } + return isRemotePipeline +} + +// TaskRunObjectV1Beta1 extends v1beta1.TaskRun with additional functions. +type TaskRunObjectV1Beta1 struct { + *v1beta1.TaskRun +} + +var _ TektonObject = &TaskRunObjectV1Beta1{} + +func NewTaskRunObjectV1Beta1(tr *v1beta1.TaskRun) *TaskRunObjectV1Beta1 { //nolint:staticcheck + return &TaskRunObjectV1Beta1{ + tr, + } +} + +// Get the TaskRun GroupVersionKind +func (tro *TaskRunObjectV1Beta1) GetGVK() string { + return fmt.Sprintf("%s/%s", tro.GetGroupVersionKind().GroupVersion().String(), tro.GetGroupVersionKind().Kind) +} + +func (tro *TaskRunObjectV1Beta1) GetKindName() string { + return strings.ToLower(tro.GetGroupVersionKind().Kind) +} + +func (tro *TaskRunObjectV1Beta1) GetProvenance() *v1.Provenance { + var rs *v1.RefSource + if tro.Status.Provenance != nil && tro.Status.Provenance.RefSource != nil { + rs = &v1.RefSource{ + URI: tro.Status.Provenance.RefSource.URI, + Digest: tro.Status.Provenance.RefSource.Digest, + EntryPoint: tro.Status.Provenance.RefSource.EntryPoint, + } + } else if tro.Status.Provenance != nil && tro.Status.Provenance.ConfigSource != nil { //nolint:staticcheck + rs = &v1.RefSource{ + URI: tro.Status.Provenance.ConfigSource.URI, //nolint:staticcheck + Digest: tro.Status.Provenance.ConfigSource.Digest, //nolint:staticcheck + EntryPoint: tro.Status.Provenance.ConfigSource.EntryPoint, //nolint:staticcheck + } + } + + var ff *config.FeatureFlags + if tro.Status.Provenance != nil { + ff = tro.Status.Provenance.FeatureFlags + } + + return &v1.Provenance{ + RefSource: rs, + FeatureFlags: ff, + } +} + +// Get the latest annotations on the TaskRun +func (tro *TaskRunObjectV1Beta1) GetLatestAnnotations(ctx context.Context, clientSet versioned.Interface) (map[string]string, error) { + tr, err := clientSet.TektonV1beta1().TaskRuns(tro.Namespace).Get(ctx, tro.Name, metav1.GetOptions{}) + return tr.Annotations, err +} + +// Get the base TaskRun object +func (tro *TaskRunObjectV1Beta1) GetObject() interface{} { + return tro.TaskRun +} + +// Patch the original TaskRun object +func (tro *TaskRunObjectV1Beta1) Patch(ctx context.Context, clientSet versioned.Interface, patchBytes []byte) error { + _, err := clientSet.TektonV1beta1().TaskRuns(tro.Namespace).Patch( + ctx, tro.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}) + return err +} + +// Get the TaskRun results +func (tro *TaskRunObjectV1Beta1) GetResults() []Result { + res := []Result{} + for _, key := range tro.Status.TaskRunResults { + res = append(res, Result{ + Name: key.Name, + Value: v1.ParamValue{ + ArrayVal: key.Value.ArrayVal, + ObjectVal: key.Value.ObjectVal, + StringVal: key.Value.StringVal, + Type: v1.ParamType(key.Value.Type), + }, + }) + } + return res +} + +func (tro *TaskRunObjectV1Beta1) GetStepImages() []string { + images := []string{} + for _, stepState := range tro.Status.Steps { + images = append(images, stepState.ImageID) + } + return images +} + +func (tro *TaskRunObjectV1Beta1) GetSidecarImages() []string { + images := []string{} + for _, sidecarState := range tro.Status.Sidecars { + images = append(images, sidecarState.ImageID) + } + return images +} + +// Get the ServiceAccount declared in the TaskRun +func (tro *TaskRunObjectV1Beta1) GetServiceAccountName() string { + return tro.Spec.ServiceAccountName +} + +// Get the imgPullSecrets from the pod template +func (tro *TaskRunObjectV1Beta1) GetPullSecrets() []string { + return getPodPullSecrets(tro.Spec.PodTemplate) +} + +func (tro *TaskRunObjectV1Beta1) SupportsTaskRunArtifact() bool { + return true +} + +func (tro *TaskRunObjectV1Beta1) SupportsPipelineRunArtifact() bool { + return false +} + +func (tro *TaskRunObjectV1Beta1) SupportsOCIArtifact() bool { + return true +} + +func (tro *TaskRunObjectV1Beta1) GetRemoteProvenance() *v1.Provenance { + if t := tro.Status.Provenance; t != nil && t.RefSource != nil && tro.IsRemote() { + return &v1.Provenance{ + RefSource: tro.GetProvenance().RefSource, + FeatureFlags: tro.GetProvenance().FeatureFlags, + } + } + return nil +} + +func (tro *TaskRunObjectV1Beta1) IsRemote() bool { + isRemoteTask := false + if tro.Spec.TaskRef != nil { + if tro.Spec.TaskRef.Resolver != "" && tro.Spec.TaskRef.Resolver != "Cluster" { + isRemoteTask = true + } + } + return isRemoteTask +} diff --git a/pkg/chains/objects/objects_test.go b/pkg/chains/objects/objects_test.go index 61e9817f3d..0ae36ff044 100644 --- a/pkg/chains/objects/objects_test.go +++ b/pkg/chains/objects/objects_test.go @@ -19,7 +19,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/assert" "github.com/tektoncd/pipeline/pkg/apis/pipeline/pod" - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -38,8 +38,8 @@ func getEmptyTemplate() *pod.PodTemplate { return &pod.PodTemplate{} } -func getTaskRun() *v1beta1.TaskRun { - return &v1beta1.TaskRun{ +func getTaskRun() *v1.TaskRun { + return &v1.TaskRun{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", Namespace: "objects-test", @@ -47,47 +47,47 @@ func getTaskRun() *v1beta1.TaskRun { PipelineTaskLabel: "foo-task", }, }, - Spec: v1beta1.TaskRunSpec{ + Spec: v1.TaskRunSpec{ ServiceAccountName: "taskrun-sa", - Params: []v1beta1.Param{ + Params: []v1.Param{ { Name: "runtime-param", - Value: *v1beta1.NewStructuredValues("runtime-value"), + Value: *v1.NewStructuredValues("runtime-value"), }, }, }, - Status: v1beta1.TaskRunStatus{ - TaskRunStatusFields: v1beta1.TaskRunStatusFields{ - Provenance: &v1beta1.Provenance{ - RefSource: &v1beta1.RefSource{ + Status: v1.TaskRunStatus{ + TaskRunStatusFields: v1.TaskRunStatusFields{ + Provenance: &v1.Provenance{ + RefSource: &v1.RefSource{ URI: "https://github.com/tektoncd/chains", Digest: map[string]string{"sha1": "abcdef"}, EntryPoint: "pkg/chains/objects.go", }, }, - TaskSpec: &v1beta1.TaskSpec{ - Params: []v1beta1.ParamSpec{ + TaskSpec: &v1.TaskSpec{ + Params: []v1.ParamSpec{ { Name: "param1", - Default: v1beta1.NewStructuredValues("default-value"), + Default: v1.NewStructuredValues("default-value"), }, }, }, - TaskRunResults: []v1beta1.TaskRunResult{ + Results: []v1.TaskRunResult{ { Name: "img1_input_ARTIFACT_INPUTS", - Value: *v1beta1.NewObject(map[string]string{ + Value: *v1.NewObject(map[string]string{ "uri": "gcr.io/foo/bar", "digest": "sha256:05f95b26ed10668b7183c1e2da98610e91372fa9f510046d4ce5812addad86b7", }), }, - {Name: "mvn1_ARTIFACT_URI", Value: *v1beta1.NewStructuredValues("projects/test-project/locations/us-west4/repositories/test-repo/mavenArtifacts/com.google.guava:guava:31.0-jre")}, - {Name: "mvn1_ARTIFACT_DIGEST", Value: *v1beta1.NewStructuredValues("sha256:05f95b26ed10668b7183c1e2da98610e91372fa9f510046d4ce5812addad86b5")}, + {Name: "mvn1_ARTIFACT_URI", Value: *v1.NewStructuredValues("projects/test-project/locations/us-west4/repositories/test-repo/mavenArtifacts/com.google.guava:guava:31.0-jre")}, + {Name: "mvn1_ARTIFACT_DIGEST", Value: *v1.NewStructuredValues("sha256:05f95b26ed10668b7183c1e2da98610e91372fa9f510046d4ce5812addad86b5")}, }, - Steps: []v1beta1.StepState{{ + Steps: []v1.StepState{{ ImageID: "step-image", }}, - Sidecars: []v1beta1.SidecarState{{ + Sidecars: []v1.SidecarState{{ ImageID: "sidecar-image", }}, }, @@ -95,48 +95,50 @@ func getTaskRun() *v1beta1.TaskRun { } } -func getPipelineRun() *v1beta1.PipelineRun { - return &v1beta1.PipelineRun{ +func getPipelineRun() *v1.PipelineRun { + return &v1.PipelineRun{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", Namespace: "objects-test", }, - Spec: v1beta1.PipelineRunSpec{ - ServiceAccountName: "pipelinerun-sa", - Params: []v1beta1.Param{ + Spec: v1.PipelineRunSpec{ + TaskRunTemplate: v1.PipelineTaskRunTemplate{ + ServiceAccountName: "pipelinerun-sa", + }, + Params: []v1.Param{ { Name: "runtime-param", - Value: *v1beta1.NewStructuredValues("runtime-value"), + Value: *v1.NewStructuredValues("runtime-value"), }, }, }, - Status: v1beta1.PipelineRunStatus{ - PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{ - Provenance: &v1beta1.Provenance{ - RefSource: &v1beta1.RefSource{ + Status: v1.PipelineRunStatus{ + PipelineRunStatusFields: v1.PipelineRunStatusFields{ + Provenance: &v1.Provenance{ + RefSource: &v1.RefSource{ URI: "https://github.com/tektoncd/chains", Digest: map[string]string{"sha1": "abcdef"}, EntryPoint: "pkg/chains/objects.go", }, }, - PipelineSpec: &v1beta1.PipelineSpec{ - Params: []v1beta1.ParamSpec{ + PipelineSpec: &v1.PipelineSpec{ + Params: []v1.ParamSpec{ { Name: "param1", - Default: v1beta1.NewStructuredValues("default-value"), + Default: v1.NewStructuredValues("default-value"), }, }, }, - PipelineResults: []v1beta1.PipelineRunResult{ + Results: []v1.PipelineRunResult{ { Name: "img1_input_ARTIFACT_INPUTS", - Value: *v1beta1.NewObject(map[string]string{ + Value: *v1.NewObject(map[string]string{ "uri": "gcr.io/foo/bar", "digest": "sha256:05f95b26ed10668b7183c1e2da98610e91372fa9f510046d4ce5812addad86b7", }), }, - {Name: "mvn1_ARTIFACT_URI", Value: *v1beta1.NewStructuredValues("projects/test-project/locations/us-west4/repositories/test-repo/mavenArtifacts/com.google.guava:guava:31.0-jre")}, - {Name: "mvn1_ARTIFACT_DIGEST", Value: *v1beta1.NewStructuredValues("sha256:05f95b26ed10668b7183c1e2da98610e91372fa9f510046d4ce5812addad86b5")}, + {Name: "mvn1_ARTIFACT_URI", Value: *v1.NewStructuredValues("projects/test-project/locations/us-west4/repositories/test-repo/mavenArtifacts/com.google.guava:guava:31.0-jre")}, + {Name: "mvn1_ARTIFACT_DIGEST", Value: *v1.NewStructuredValues("sha256:05f95b26ed10668b7183c1e2da98610e91372fa9f510046d4ce5812addad86b5")}, }, }, }, @@ -170,7 +172,7 @@ func TestTaskRun_ImagePullSecrets(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - tr := NewTaskRunObject(getTaskRun()) + tr := NewTaskRunObjectV1(getTaskRun()) tr.Spec.PodTemplate = tt.template secret := tr.GetPullSecrets() assert.ElementsMatch(t, secret, tt.want) @@ -206,8 +208,8 @@ func TestPipelineRun_ImagePullSecrets(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - pr := NewPipelineRunObject(getPipelineRun()) - pr.Spec.PodTemplate = tt.template + pr := NewPipelineRunObjectV1(getPipelineRun()) + pr.Spec.TaskRunTemplate.PodTemplate = tt.template secret := pr.GetPullSecrets() assert.ElementsMatch(t, secret, tt.want) }) @@ -217,10 +219,10 @@ func TestPipelineRun_ImagePullSecrets(t *testing.T) { func TestPipelineRun_GetProvenance(t *testing.T) { t.Run("TestPipelineRun_GetProvenance", func(t *testing.T) { - pr := NewPipelineRunObject(getPipelineRun()) + pr := NewPipelineRunObjectV1(getPipelineRun()) got := pr.GetProvenance() - want := &v1beta1.Provenance{ - RefSource: &v1beta1.RefSource{ + want := &v1.Provenance{ + RefSource: &v1.RefSource{ URI: "https://github.com/tektoncd/chains", Digest: map[string]string{"sha1": "abcdef"}, EntryPoint: "pkg/chains/objects.go", @@ -236,10 +238,10 @@ func TestPipelineRun_GetProvenance(t *testing.T) { func TestTaskRun_GetProvenance(t *testing.T) { t.Run("TestTaskRun_GetProvenance", func(t *testing.T) { - tr := NewTaskRunObject(getTaskRun()) + tr := NewTaskRunObjectV1(getTaskRun()) got := tr.GetProvenance() - want := &v1beta1.Provenance{ - RefSource: &v1beta1.RefSource{ + want := &v1.Provenance{ + RefSource: &v1.RefSource{ URI: "https://github.com/tektoncd/chains", Digest: map[string]string{"sha1": "abcdef"}, EntryPoint: "pkg/chains/objects.go", @@ -255,18 +257,18 @@ func TestTaskRun_GetProvenance(t *testing.T) { func TestPipelineRun_GetResults(t *testing.T) { t.Run("TestPipelineRun_GetResults", func(t *testing.T) { - pr := NewPipelineRunObject(getPipelineRun()) + pr := NewPipelineRunObjectV1(getPipelineRun()) got := pr.GetResults() assert.ElementsMatch(t, got, []Result{ - { + Result{ Name: "img1_input_ARTIFACT_INPUTS", - Value: *v1beta1.NewObject(map[string]string{ + Value: *v1.NewObject(map[string]string{ "uri": "gcr.io/foo/bar", "digest": "sha256:05f95b26ed10668b7183c1e2da98610e91372fa9f510046d4ce5812addad86b7", }), }, - {Name: "mvn1_ARTIFACT_URI", Value: *v1beta1.NewStructuredValues("projects/test-project/locations/us-west4/repositories/test-repo/mavenArtifacts/com.google.guava:guava:31.0-jre")}, - {Name: "mvn1_ARTIFACT_DIGEST", Value: *v1beta1.NewStructuredValues("sha256:05f95b26ed10668b7183c1e2da98610e91372fa9f510046d4ce5812addad86b5")}, + Result{Name: "mvn1_ARTIFACT_URI", Value: *v1.NewStructuredValues("projects/test-project/locations/us-west4/repositories/test-repo/mavenArtifacts/com.google.guava:guava:31.0-jre")}, + Result{Name: "mvn1_ARTIFACT_DIGEST", Value: *v1.NewStructuredValues("sha256:05f95b26ed10668b7183c1e2da98610e91372fa9f510046d4ce5812addad86b5")}, }) }) @@ -275,7 +277,7 @@ func TestPipelineRun_GetResults(t *testing.T) { func TestTaskRun_GetStepImages(t *testing.T) { t.Run("TestTaskRun_GetStepImages", func(t *testing.T) { - tr := NewTaskRunObject(getTaskRun()) + tr := NewTaskRunObjectV1(getTaskRun()) got := tr.GetStepImages() want := []string{"step-image"} if d := cmp.Diff(want, got); d != "" { @@ -288,7 +290,7 @@ func TestTaskRun_GetStepImages(t *testing.T) { func TestTaskRun_GetSidecarImages(t *testing.T) { t.Run("TestTaskRun_GetSidecarImages", func(t *testing.T) { - tr := NewTaskRunObject(getTaskRun()) + tr := NewTaskRunObjectV1(getTaskRun()) got := tr.GetSidecarImages() want := []string{"sidecar-image"} if d := cmp.Diff(want, got); d != "" { @@ -301,55 +303,55 @@ func TestTaskRun_GetSidecarImages(t *testing.T) { func TestTaskRun_GetResults(t *testing.T) { t.Run("TestTaskRun_GetResults", func(t *testing.T) { - pr := NewTaskRunObject(getTaskRun()) + pr := NewTaskRunObjectV1(getTaskRun()) got := pr.GetResults() assert.ElementsMatch(t, got, []Result{ - { + Result{ Name: "img1_input_ARTIFACT_INPUTS", - Value: *v1beta1.NewObject(map[string]string{ + Value: *v1.NewObject(map[string]string{ "uri": "gcr.io/foo/bar", "digest": "sha256:05f95b26ed10668b7183c1e2da98610e91372fa9f510046d4ce5812addad86b7", }), }, - {Name: "mvn1_ARTIFACT_URI", Value: *v1beta1.NewStructuredValues("projects/test-project/locations/us-west4/repositories/test-repo/mavenArtifacts/com.google.guava:guava:31.0-jre")}, - {Name: "mvn1_ARTIFACT_DIGEST", Value: *v1beta1.NewStructuredValues("sha256:05f95b26ed10668b7183c1e2da98610e91372fa9f510046d4ce5812addad86b5")}, + Result{Name: "mvn1_ARTIFACT_URI", Value: *v1.NewStructuredValues("projects/test-project/locations/us-west4/repositories/test-repo/mavenArtifacts/com.google.guava:guava:31.0-jre")}, + Result{Name: "mvn1_ARTIFACT_DIGEST", Value: *v1.NewStructuredValues("sha256:05f95b26ed10668b7183c1e2da98610e91372fa9f510046d4ce5812addad86b5")}, }) }) } func TestPipelineRun_GetGVK(t *testing.T) { - assert.Equal(t, "tekton.dev/v1beta1/PipelineRun", NewPipelineRunObject(getPipelineRun()).GetGVK()) + assert.Equal(t, "tekton.dev/v1/PipelineRun", NewPipelineRunObjectV1(getPipelineRun()).GetGVK()) } func TestTaskRun_GetGVK(t *testing.T) { - assert.Equal(t, "tekton.dev/v1beta1/TaskRun", NewTaskRunObject(getTaskRun()).GetGVK()) + assert.Equal(t, "tekton.dev/v1/TaskRun", NewTaskRunObjectV1(getTaskRun()).GetGVK()) } func TestPipelineRun_GetKindName(t *testing.T) { - assert.Equal(t, "pipelinerun", NewPipelineRunObject(getPipelineRun()).GetKindName()) + assert.Equal(t, "pipelinerun", NewPipelineRunObjectV1(getPipelineRun()).GetKindName()) } func TestTaskRun_GetKindName(t *testing.T) { - assert.Equal(t, "taskrun", NewTaskRunObject(getTaskRun()).GetKindName()) + assert.Equal(t, "taskrun", NewTaskRunObjectV1(getTaskRun()).GetKindName()) } func TestPipelineRun_GetServiceAccountName(t *testing.T) { - assert.Equal(t, "pipelinerun-sa", NewPipelineRunObject(getPipelineRun()).GetServiceAccountName()) + assert.Equal(t, "pipelinerun-sa", NewPipelineRunObjectV1(getPipelineRun()).GetServiceAccountName()) } func TestTaskRun_GetServiceAccountName(t *testing.T) { - assert.Equal(t, "taskrun-sa", NewTaskRunObject(getTaskRun()).GetServiceAccountName()) + assert.Equal(t, "taskrun-sa", NewTaskRunObjectV1(getTaskRun()).GetServiceAccountName()) } func TestNewTektonObject(t *testing.T) { tro, err := NewTektonObject(getTaskRun()) assert.NoError(t, err) - assert.IsType(t, &TaskRunObject{}, tro) + assert.IsType(t, &TaskRunObjectV1{}, tro) pro, err := NewTektonObject(getPipelineRun()) assert.NoError(t, err) - assert.IsType(t, &PipelineRunObject{}, pro) + assert.IsType(t, &PipelineRunObjectV1{}, pro) unknown, err := NewTektonObject("someting-else") assert.Nil(t, unknown) @@ -357,7 +359,7 @@ func TestNewTektonObject(t *testing.T) { } func TestPipelineRun_GetTaskRunFromTask(t *testing.T) { - pro := NewPipelineRunObject(getPipelineRun()) + pro := NewPipelineRunObjectV1(getPipelineRun()) assert.Nil(t, pro.GetTaskRunFromTask("missing")) assert.Nil(t, pro.GetTaskRunFromTask("foo-task")) @@ -369,14 +371,14 @@ func TestPipelineRun_GetTaskRunFromTask(t *testing.T) { } func TestProvenanceExists(t *testing.T) { - pro := NewPipelineRunObject(getPipelineRun()) - provenance := &v1beta1.Provenance{ - RefSource: &v1beta1.RefSource{ + pro := NewPipelineRunObjectV1(getPipelineRun()) + provenance := &v1.Provenance{ + RefSource: &v1.RefSource{ URI: "tekton.com", }, } - pro.Status.Provenance = &v1beta1.Provenance{ - RefSource: &v1beta1.RefSource{ + pro.Status.Provenance = &v1.Provenance{ + RefSource: &v1.RefSource{ URI: "tekton.com", }, } @@ -384,14 +386,14 @@ func TestProvenanceExists(t *testing.T) { } func TestPipelineRunRemoteProvenance(t *testing.T) { - pro := NewPipelineRunObject(getPipelineRun()) - provenance := &v1beta1.Provenance{ - RefSource: &v1beta1.RefSource{ + pro := NewPipelineRunObjectV1(getPipelineRun()) + provenance := &v1.Provenance{ + RefSource: &v1.RefSource{ URI: "tekton.com", }, } - pro.Status.Provenance = &v1beta1.Provenance{ - RefSource: &v1beta1.RefSource{ + pro.Status.Provenance = &v1.Provenance{ + RefSource: &v1.RefSource{ URI: "tekton.com", }, } @@ -399,14 +401,14 @@ func TestPipelineRunRemoteProvenance(t *testing.T) { } func TestTaskRunRemoteProvenance(t *testing.T) { - tro := NewTaskRunObject(getTaskRun()) - provenance := &v1beta1.Provenance{ - RefSource: &v1beta1.RefSource{ + tro := NewTaskRunObjectV1(getTaskRun()) + provenance := &v1.Provenance{ + RefSource: &v1.RefSource{ URI: "tekton.com", }, } - tro.Status.Provenance = &v1beta1.Provenance{ - RefSource: &v1beta1.RefSource{ + tro.Status.Provenance = &v1.Provenance{ + RefSource: &v1.RefSource{ URI: "tekton.com", }, } @@ -414,9 +416,9 @@ func TestTaskRunRemoteProvenance(t *testing.T) { } func TestPipelineRunIsRemote(t *testing.T) { - pro := NewPipelineRunObject(getPipelineRun()) - pro.Spec.PipelineRef = &v1beta1.PipelineRef{ - ResolverRef: v1beta1.ResolverRef{ + pro := NewPipelineRunObjectV1(getPipelineRun()) + pro.Spec.PipelineRef = &v1.PipelineRef{ + ResolverRef: v1.ResolverRef{ Resolver: "Bundle", }, } @@ -424,9 +426,9 @@ func TestPipelineRunIsRemote(t *testing.T) { } func TestTaskRunIsRemote(t *testing.T) { - tro := NewTaskRunObject(getTaskRun()) - tro.Spec.TaskRef = &v1beta1.TaskRef{ - ResolverRef: v1beta1.ResolverRef{ + tro := NewTaskRunObjectV1(getTaskRun()) + tro.Spec.TaskRef = &v1.TaskRef{ + ResolverRef: v1.ResolverRef{ Resolver: "Bundle", }, } diff --git a/pkg/chains/rekor_test.go b/pkg/chains/rekor_test.go index cbc8075c93..ce53fdc1bf 100644 --- a/pkg/chains/rekor_test.go +++ b/pkg/chains/rekor_test.go @@ -18,8 +18,8 @@ import ( "github.com/tektoncd/chains/pkg/chains/objects" "github.com/tektoncd/chains/pkg/config" - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) func TestShouldUploadTlog(t *testing.T) { @@ -77,13 +77,13 @@ func TestShouldUploadTlog(t *testing.T) { for _, test := range tests { t.Run(test.description, func(t *testing.T) { - tr := &v1beta1.TaskRun{ - ObjectMeta: v1.ObjectMeta{ + tr := &v1.TaskRun{ + ObjectMeta: metav1.ObjectMeta{ Annotations: test.annotations, }, } cfg := config.Config{Transparency: test.cfg} - trObj := objects.NewTaskRunObject(tr) + trObj := objects.NewTaskRunObjectV1(tr) got := shouldUploadTlog(cfg, trObj) if got != test.expected { t.Fatalf("got (%v) doesn't match expected (%v)", got, test.expected) diff --git a/pkg/chains/signing_test.go b/pkg/chains/signing_test.go index 4660a04c57..0a8cba2f09 100644 --- a/pkg/chains/signing_test.go +++ b/pkg/chains/signing_test.go @@ -26,7 +26,7 @@ import ( "github.com/tektoncd/chains/pkg/chains/storage" "github.com/tektoncd/chains/pkg/config" "github.com/tektoncd/chains/pkg/test/tekton" - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" fakepipelineclient "github.com/tektoncd/pipeline/pkg/client/injection/client/fake" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" @@ -40,13 +40,13 @@ func TestSigner_Sign(t *testing.T) { // - generates payloads // - stores them in the configured systems // - marks the object as signed - tro := objects.NewTaskRunObject(&v1beta1.TaskRun{ + tro := objects.NewTaskRunObjectV1(&v1.TaskRun{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", }, }) - pro := objects.NewPipelineRunObject(&v1beta1.PipelineRun{ + pro := objects.NewPipelineRunObjectV1(&v1.PipelineRun{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", }, @@ -186,14 +186,14 @@ func TestSigner_Sign(t *testing.T) { func TestSigner_Transparency(t *testing.T) { newTaskRun := func(name string) objects.TektonObject { - return objects.NewTaskRunObject(&v1beta1.TaskRun{ + return objects.NewTaskRunObjectV1(&v1.TaskRun{ ObjectMeta: metav1.ObjectMeta{ Name: name, }, }) } newPipelineRun := func(name string) objects.TektonObject { - return objects.NewPipelineRunObject(&v1beta1.PipelineRun{ + return objects.NewPipelineRunObjectV1(&v1.PipelineRun{ ObjectMeta: metav1.ObjectMeta{ Name: name, }, @@ -202,12 +202,12 @@ func TestSigner_Transparency(t *testing.T) { setAnnotation := func(obj objects.TektonObject, key, value string) { // TODO: opportunity to add code reuse switch o := obj.GetObject().(type) { - case *v1beta1.PipelineRun: + case *v1.PipelineRun: if o.Annotations == nil { o.Annotations = make(map[string]string) } o.Annotations[key] = value - case *v1beta1.TaskRun: + case *v1.TaskRun: if o.Annotations == nil { o.Annotations = make(map[string]string) } diff --git a/pkg/chains/storage/docdb/docdb_test.go b/pkg/chains/storage/docdb/docdb_test.go index 95e0386590..9ac4f6ae2f 100644 --- a/pkg/chains/storage/docdb/docdb_test.go +++ b/pkg/chains/storage/docdb/docdb_test.go @@ -43,7 +43,7 @@ func TestBackend_StorePayload(t *testing.T) { { name: "no error", args: args{ - rawPayload: &v1beta1.TaskRun{ObjectMeta: metav1.ObjectMeta{UID: "foo"}}, + rawPayload: &v1beta1.TaskRun{ObjectMeta: metav1.ObjectMeta{UID: "foo"}}, //nolint:staticcheck signature: "signature", key: "foo", }, @@ -51,7 +51,7 @@ func TestBackend_StorePayload(t *testing.T) { { name: "no error - PipelineRun", args: args{ - rawPayload: &v1beta1.PipelineRun{ObjectMeta: metav1.ObjectMeta{UID: "foo"}}, + rawPayload: &v1beta1.PipelineRun{ObjectMeta: metav1.ObjectMeta{UID: "foo"}}, //nolint:staticcheck signature: "signature", key: "moo", }, diff --git a/pkg/chains/storage/gcs/gcs.go b/pkg/chains/storage/gcs/gcs.go index 1c42406dc6..b4167c534e 100644 --- a/pkg/chains/storage/gcs/gcs.go +++ b/pkg/chains/storage/gcs/gcs.go @@ -26,6 +26,7 @@ import ( "github.com/tektoncd/chains/pkg/chains/signing" "github.com/tektoncd/chains/pkg/chains/storage/api" "github.com/tektoncd/chains/pkg/config" + v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" ) @@ -62,19 +63,20 @@ func NewStorageBackend(ctx context.Context, cfg config.Config) (*Backend, error) }, nil } -// StorePayload implements the storage.Backend interface. +// StorePayload implements the storage.Backend interface. As of chains v0.20.0+, +// this method has been updated to use Tekton v1 objects (previously v1beta1) and +// it's error messages have been updated to reflect this. // //nolint:staticcheck func (b *Backend) StorePayload(ctx context.Context, obj objects.TektonObject, rawPayload []byte, signature string, opts config.StorageOpts) error { logger := logging.FromContext(ctx) - if tr, isTaskRun := obj.GetObject().(*v1beta1.TaskRun); isTaskRun { + if tr, isTaskRun := obj.GetObject().(*v1.TaskRun); isTaskRun { store := &TaskRunStorer{ writer: b.writer, key: opts.ShortKey, } - // TODO(https://github.com/tektoncd/chains/issues/665) currently using deprecated v1beta1 APIs until we add full v1 support - if _, err := store.Store(ctx, &api.StoreRequest[*v1beta1.TaskRun, *in_toto.Statement]{ + if _, err := store.Store(ctx, &api.StoreRequest[*v1.TaskRun, *in_toto.Statement]{ Object: obj, Artifact: tr, // We don't actually use payload - we store the raw bundle values directly. @@ -89,13 +91,13 @@ func (b *Backend) StorePayload(ctx context.Context, obj objects.TektonObject, ra logger.Errorf("error writing to GCS: %w", err) return err } - } else if pr, isPipelineRun := obj.GetObject().(*v1beta1.PipelineRun); isPipelineRun { + } else if pr, isPipelineRun := obj.GetObject().(*v1.PipelineRun); isPipelineRun { store := &PipelineRunStorer{ writer: b.writer, key: opts.ShortKey, } // TODO(https://github.com/tektoncd/chains/issues/665) currently using deprecated v1beta1 APIs until we add full v1 support - if _, err := store.Store(ctx, &api.StoreRequest[*v1beta1.PipelineRun, *in_toto.Statement]{ + if _, err := store.Store(ctx, &api.StoreRequest[*v1.PipelineRun, *in_toto.Statement]{ Object: obj, Artifact: pr, // We don't actually use payload - we store the raw bundle values directly. @@ -111,7 +113,7 @@ func (b *Backend) StorePayload(ctx context.Context, obj objects.TektonObject, ra return err } } else { - return fmt.Errorf("type %T not supported - supported types: [*v1beta1.TaskRun, *v1beta1.PipelineRun]", obj.GetObject()) + return fmt.Errorf("type %T not supported - supported types: [*v1.TaskRun, *v1.PipelineRun]", obj.GetObject()) } return nil } @@ -151,10 +153,14 @@ func (b *Backend) RetrieveSignatures(ctx context.Context, obj objects.TektonObje var object string switch t := obj.GetObject().(type) { + case *v1.TaskRun: + object = taskRunSigNameV1(t, opts) + case *v1.PipelineRun: + object = pipelineRunSignameV1(t, opts) case *v1beta1.TaskRun: - object = taskRunSigName(t, opts) + object = taskRunSigNameV1Beta1(t, opts) case *v1beta1.PipelineRun: - object = pipelineRunSigname(t, opts) + object = pipelineRunSignameV1Beta1(t, opts) default: return nil, fmt.Errorf("unsupported TektonObject type: %T", t) } @@ -174,10 +180,14 @@ func (b *Backend) RetrievePayloads(ctx context.Context, obj objects.TektonObject var object string switch t := obj.GetObject().(type) { + case *v1.TaskRun: + object = taskRunPayloadNameV1(t, opts) + case *v1.PipelineRun: + object = pipelineRunPayloadNameV1(t, opts) case *v1beta1.TaskRun: - object = taskRunPayloadName(t, opts) + object = taskRunPayloadNameV1Beta1(t, opts) case *v1beta1.PipelineRun: - object = pipelineRunPayloadName(t, opts) + object = pipelineRunPayloadNameV1Beta1(t, opts) default: return nil, fmt.Errorf("unsupported TektonObject type: %T", t) } @@ -207,29 +217,49 @@ func (b *Backend) retrieveObject(ctx context.Context, object string) (string, er } //nolint:staticcheck -func taskRunSigName(tr *v1beta1.TaskRun, opts config.StorageOpts) string { +func taskRunSigNameV1(tr *v1.TaskRun, opts config.StorageOpts) string { + return fmt.Sprintf(SignatureNameFormatTaskRun, tr.Namespace, tr.Name, opts.ShortKey) +} + +//nolint:staticcheck +func taskRunPayloadNameV1(tr *v1.TaskRun, opts config.StorageOpts) string { + return fmt.Sprintf(PayloadNameFormatTaskRun, tr.Namespace, tr.Name, opts.ShortKey) +} + +//nolint:staticcheck +func pipelineRunSignameV1(pr *v1.PipelineRun, opts config.StorageOpts) string { + return fmt.Sprintf(SignatureNameFormatPipelineRun, pr.Namespace, pr.Name, opts.ShortKey) +} + +//nolint:staticcheck +func pipelineRunPayloadNameV1(pr *v1.PipelineRun, opts config.StorageOpts) string { + return fmt.Sprintf(PayloadNameFormatPipelineRun, pr.Namespace, pr.Name, opts.ShortKey) +} + +//nolint:staticcheck +func taskRunSigNameV1Beta1(tr *v1beta1.TaskRun, opts config.StorageOpts) string { return fmt.Sprintf(SignatureNameFormatTaskRun, tr.Namespace, tr.Name, opts.ShortKey) } //nolint:staticcheck -func taskRunPayloadName(tr *v1beta1.TaskRun, opts config.StorageOpts) string { +func taskRunPayloadNameV1Beta1(tr *v1beta1.TaskRun, opts config.StorageOpts) string { return fmt.Sprintf(PayloadNameFormatTaskRun, tr.Namespace, tr.Name, opts.ShortKey) } //nolint:staticcheck -func pipelineRunSigname(pr *v1beta1.PipelineRun, opts config.StorageOpts) string { +func pipelineRunSignameV1Beta1(pr *v1beta1.PipelineRun, opts config.StorageOpts) string { return fmt.Sprintf(SignatureNameFormatPipelineRun, pr.Namespace, pr.Name, opts.ShortKey) } //nolint:staticcheck -func pipelineRunPayloadName(pr *v1beta1.PipelineRun, opts config.StorageOpts) string { +func pipelineRunPayloadNameV1Beta1(pr *v1beta1.PipelineRun, opts config.StorageOpts) string { return fmt.Sprintf(PayloadNameFormatPipelineRun, pr.Namespace, pr.Name, opts.ShortKey) } //nolint:staticcheck var ( - _ api.Storer[*v1beta1.TaskRun, *in_toto.Statement] = &TaskRunStorer{} - _ api.Storer[*v1beta1.PipelineRun, *in_toto.Statement] = &PipelineRunStorer{} + _ api.Storer[*v1.TaskRun, *in_toto.Statement] = &TaskRunStorer{} + _ api.Storer[*v1.PipelineRun, *in_toto.Statement] = &PipelineRunStorer{} ) // TaskRunStorer stores TaskRuns in GCS. @@ -244,7 +274,7 @@ type TaskRunStorer struct { // Store stores the TaskRun chains information in GCS // //nolint:staticcheck -func (s *TaskRunStorer) Store(ctx context.Context, req *api.StoreRequest[*v1beta1.TaskRun, *in_toto.Statement]) (*api.StoreResponse, error) { +func (s *TaskRunStorer) Store(ctx context.Context, req *api.StoreRequest[*v1.TaskRun, *in_toto.Statement]) (*api.StoreResponse, error) { tr := req.Artifact key := s.key if key == "" { @@ -268,7 +298,7 @@ type PipelineRunStorer struct { // Store stores the PipelineRun chains information in GCS // //nolint:staticcheck -func (s *PipelineRunStorer) Store(ctx context.Context, req *api.StoreRequest[*v1beta1.PipelineRun, *in_toto.Statement]) (*api.StoreResponse, error) { +func (s *PipelineRunStorer) Store(ctx context.Context, req *api.StoreRequest[*v1.PipelineRun, *in_toto.Statement]) (*api.StoreResponse, error) { pr := req.Artifact key := s.key if key == "" { diff --git a/pkg/chains/storage/gcs/gcs_test.go b/pkg/chains/storage/gcs/gcs_test.go index 1ff15f53af..4468a8d9ae 100644 --- a/pkg/chains/storage/gcs/gcs_test.go +++ b/pkg/chains/storage/gcs/gcs_test.go @@ -23,7 +23,7 @@ import ( "github.com/tektoncd/chains/pkg/chains/objects" "github.com/tektoncd/chains/pkg/config" - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" rtesting "knative.dev/pkg/reconciler/testing" @@ -32,8 +32,8 @@ import ( //nolint:staticcheck func TestBackend_StorePayload(t *testing.T) { type args struct { - tr *v1beta1.TaskRun - pr *v1beta1.PipelineRun + tr *v1.TaskRun + pr *v1.PipelineRun signed []byte signature string opts config.StorageOpts @@ -46,14 +46,14 @@ func TestBackend_StorePayload(t *testing.T) { { name: "no error, intoto", args: args{ - tr: &v1beta1.TaskRun{ + tr: &v1.TaskRun{ ObjectMeta: metav1.ObjectMeta{ Namespace: "foo", Name: "bar", UID: types.UID("uid"), }, }, - pr: &v1beta1.PipelineRun{ + pr: &v1.PipelineRun{ ObjectMeta: metav1.ObjectMeta{ Namespace: "foo", Name: "bar", @@ -68,14 +68,14 @@ func TestBackend_StorePayload(t *testing.T) { { name: "no error, tekton", args: args{ - tr: &v1beta1.TaskRun{ + tr: &v1.TaskRun{ ObjectMeta: metav1.ObjectMeta{ Namespace: "foo", Name: "bar", UID: types.UID("uid"), }, }, - pr: &v1beta1.PipelineRun{ + pr: &v1.PipelineRun{ ObjectMeta: metav1.ObjectMeta{ Namespace: "foo", Name: "bar", @@ -98,13 +98,13 @@ func TestBackend_StorePayload(t *testing.T) { reader: mockGcsRead, cfg: config.Config{Storage: config.StorageConfigs{GCS: config.GCSStorageConfig{Bucket: "foo"}}}, } - trObj := objects.NewTaskRunObject(tt.args.tr) + trObj := objects.NewTaskRunObjectV1(tt.args.tr) if err := b.StorePayload(ctx, trObj, tt.args.signed, tt.args.signature, tt.args.opts); (err != nil) != tt.wantErr { t.Errorf("Backend.StorePayload() error = %v, wantErr %v", err, tt.wantErr) } - objectSig := taskRunSigName(tt.args.tr, tt.args.opts) - objectPayload := taskRunPayloadName(tt.args.tr, tt.args.opts) + objectSig := taskRunSigNameV1(tt.args.tr, tt.args.opts) + objectPayload := taskRunPayloadNameV1(tt.args.tr, tt.args.opts) got, err := b.RetrieveSignatures(ctx, trObj, tt.args.opts) if err != nil { t.Fatal(err) @@ -121,13 +121,13 @@ func TestBackend_StorePayload(t *testing.T) { t.Errorf("wrong signature, expected %s, got %s", tt.args.signed, gotPayload[objectPayload]) } - prObj := objects.NewPipelineRunObject(tt.args.pr) + prObj := objects.NewPipelineRunObjectV1(tt.args.pr) if err := b.StorePayload(ctx, prObj, tt.args.signed, tt.args.signature, tt.args.opts); (err != nil) != tt.wantErr { t.Errorf("Backend.StorePayload() error = %v, wantErr %v", err, tt.wantErr) } - objectSig = pipelineRunSigname(tt.args.pr, tt.args.opts) - objectPayload = pipelineRunPayloadName(tt.args.pr, tt.args.opts) + objectSig = pipelineRunSignameV1(tt.args.pr, tt.args.opts) + objectPayload = pipelineRunPayloadNameV1(tt.args.pr, tt.args.opts) got, err = b.RetrieveSignatures(ctx, prObj, tt.args.opts) if err != nil { t.Fatal(err) diff --git a/pkg/chains/storage/grafeas/grafeas_test.go b/pkg/chains/storage/grafeas/grafeas_test.go index 295ceacc50..7f9c976daf 100644 --- a/pkg/chains/storage/grafeas/grafeas_test.go +++ b/pkg/chains/storage/grafeas/grafeas_test.go @@ -66,7 +66,7 @@ const ( var ( // clone taskrun // -------------- - cloneTaskRun = &v1beta1.TaskRun{ + cloneTaskRun = &v1beta1.TaskRun{ //nolint:staticcheck ObjectMeta: metav1.ObjectMeta{ Namespace: "default", Name: "git-clone", @@ -100,7 +100,7 @@ var ( artifactIdentifier2 = fmt.Sprintf("%s@sha256:%s", artifactURL2, artifactDigest2) // artifact build taskrun - buildTaskRun = &v1beta1.TaskRun{ + buildTaskRun = &v1beta1.TaskRun{ //nolint:staticcheck ObjectMeta: metav1.ObjectMeta{ Namespace: "default", Name: "artifact-build", @@ -139,7 +139,7 @@ var ( } // ci pipelinerun - ciPipeline = &v1beta1.PipelineRun{ + ciPipeline = &v1beta1.PipelineRun{ //nolint:staticcheck ObjectMeta: metav1.ObjectMeta{ Namespace: "default", Name: "ci-pipeline", @@ -261,7 +261,7 @@ func TestGrafeasBackend_StoreAndRetrieve(t *testing.T) { { name: "intoto for clone taskrun, no error, no occurrences created because no artifacts were built.", args: args{ - runObject: &objects.TaskRunObject{ + runObject: &objects.TaskRunObjectV1Beta1{ TaskRun: cloneTaskRun, }, payload: getRawPayload(t, cloneTaskRunProvenance), @@ -274,7 +274,7 @@ func TestGrafeasBackend_StoreAndRetrieve(t *testing.T) { { name: "intoto for build taskrun, no error, 2 BUILD occurrences should be created for the 2 artifacts generated.", args: args{ - runObject: &objects.TaskRunObject{ + runObject: &objects.TaskRunObjectV1Beta1{ TaskRun: buildTaskRun, }, payload: getRawPayload(t, buildTaskRunProvenance), @@ -287,7 +287,7 @@ func TestGrafeasBackend_StoreAndRetrieve(t *testing.T) { { name: "simplesigning for the build taskrun, no error, 1 ATTESTATION occurrence should be created for the artifact specified in storageopts.key", args: args{ - runObject: &objects.TaskRunObject{ + runObject: &objects.TaskRunObjectV1Beta1{ TaskRun: buildTaskRun, }, payload: []byte("attestation payload"), @@ -300,7 +300,7 @@ func TestGrafeasBackend_StoreAndRetrieve(t *testing.T) { { name: "intoto for the ci pipeline, no error, 2 occurences should be created for the pipelinerun for the 2 artifact generated.", args: args{ - runObject: &objects.PipelineRunObject{ + runObject: &objects.PipelineRunObjectV1Beta1{ PipelineRun: ciPipeline, }, payload: getRawPayload(t, ciPipelineRunProvenance), @@ -313,7 +313,7 @@ func TestGrafeasBackend_StoreAndRetrieve(t *testing.T) { { name: "tekton format for a taskrun, error, only simplesigning and intoto are supported", args: args{ - runObject: &objects.TaskRunObject{ + runObject: &objects.TaskRunObjectV1Beta1{ TaskRun: buildTaskRun, }, payload: []byte("foo"), diff --git a/pkg/chains/storage/oci/oci_test.go b/pkg/chains/storage/oci/oci_test.go index 36d5a37a70..e7e9b01150 100644 --- a/pkg/chains/storage/oci/oci_test.go +++ b/pkg/chains/storage/oci/oci_test.go @@ -43,13 +43,13 @@ import ( const namespace = "oci-test" var ( - tr = &v1beta1.TaskRun{ + tr = &v1beta1.TaskRun{ //nolint:staticcheck ObjectMeta: metav1.ObjectMeta{ Name: "foo", Namespace: namespace, }, } - pr = &v1beta1.PipelineRun{ + pr = &v1beta1.PipelineRun{ //nolint:staticcheck ObjectMeta: metav1.ObjectMeta{ Name: "foo", Namespace: namespace, @@ -116,7 +116,7 @@ func TestBackend_StorePayload(t *testing.T) { }{{ name: "simplesigning payload", fields: fields{ - object: objects.NewTaskRunObject(tr), + object: objects.NewTaskRunObjectV1Beta1(tr), }, args: args{ payload: simple, @@ -129,7 +129,7 @@ func TestBackend_StorePayload(t *testing.T) { }, { name: "into-to payload", fields: fields{ - object: objects.NewTaskRunObject(tr), + object: objects.NewTaskRunObjectV1Beta1(tr), }, args: args{ payload: intotoStatement, @@ -142,7 +142,7 @@ func TestBackend_StorePayload(t *testing.T) { }, { name: "no subject", fields: fields{ - object: objects.NewTaskRunObject(tr), + object: objects.NewTaskRunObjectV1Beta1(tr), }, args: args{ payload: in_toto.Statement{}, @@ -155,7 +155,7 @@ func TestBackend_StorePayload(t *testing.T) { }, { name: "simplesigning payload", fields: fields{ - object: objects.NewPipelineRunObject(pr), + object: objects.NewPipelineRunObjectV1Beta1(pr), }, args: args{ payload: simple, @@ -168,7 +168,7 @@ func TestBackend_StorePayload(t *testing.T) { }, { name: "into-to payload", fields: fields{ - object: objects.NewPipelineRunObject(pr), + object: objects.NewPipelineRunObjectV1Beta1(pr), }, args: args{ payload: intotoStatement, @@ -181,7 +181,7 @@ func TestBackend_StorePayload(t *testing.T) { }, { name: "in-toto-and-simple-payload", fields: fields{ - object: objects.NewTaskRunObject(tr), + object: objects.NewTaskRunObjectV1Beta1(tr), }, args: args{ payload: simple, @@ -194,7 +194,7 @@ func TestBackend_StorePayload(t *testing.T) { }, { name: "tekton-and-simple-payload", fields: fields{ - object: objects.NewTaskRunObject(tr), + object: objects.NewTaskRunObjectV1Beta1(tr), }, args: args{ payload: simple, @@ -207,7 +207,7 @@ func TestBackend_StorePayload(t *testing.T) { }, { name: "no subject", fields: fields{ - object: objects.NewPipelineRunObject(pr), + object: objects.NewPipelineRunObjectV1Beta1(pr), }, args: args{ payload: in_toto.Statement{}, diff --git a/pkg/chains/storage/pubsub/pubsub_test.go b/pkg/chains/storage/pubsub/pubsub_test.go index 7e76202c3b..a7505674ba 100644 --- a/pkg/chains/storage/pubsub/pubsub_test.go +++ b/pkg/chains/storage/pubsub/pubsub_test.go @@ -36,7 +36,7 @@ func TestBackend_StorePayload(t *testing.T) { logger := logtesting.TestLogger(t) type fields struct { - tr *v1beta1.TaskRun + tr *v1beta1.TaskRun //nolint:staticcheck cfg config.Config } type args struct { @@ -53,7 +53,7 @@ func TestBackend_StorePayload(t *testing.T) { { name: "no subject", fields: fields{ - tr: &v1beta1.TaskRun{ + tr: &v1beta1.TaskRun{ //nolint:staticcheck ObjectMeta: v1.ObjectMeta{ Name: "foo", Namespace: "bar", @@ -109,7 +109,7 @@ func TestBackend_StorePayload(t *testing.T) { } }() - trObj := objects.NewTaskRunObject(tt.fields.tr) + trObj := objects.NewTaskRunObjectV1Beta1(tt.fields.tr) // Store the payload. if err := b.StorePayload(ctx, trObj, tt.args.rawPayload, tt.args.signature, tt.args.storageOpts); (err != nil) != tt.wantErr { t.Errorf("Backend.StorePayload() error = %v, wantErr %v", err, tt.wantErr) diff --git a/pkg/chains/storage/tekton/tekton_test.go b/pkg/chains/storage/tekton/tekton_test.go index 679d55439b..0dfb5b1b5c 100644 --- a/pkg/chains/storage/tekton/tekton_test.go +++ b/pkg/chains/storage/tekton/tekton_test.go @@ -42,7 +42,7 @@ func TestBackend_StorePayload(t *testing.T) { A: "foo", B: 3, }, - object: objects.NewTaskRunObject(&v1beta1.TaskRun{ + object: objects.NewTaskRunObjectV1Beta1(&v1beta1.TaskRun{ //nolint:staticcheck ObjectMeta: metav1.ObjectMeta{ Name: "foo", Namespace: "bar", @@ -62,7 +62,7 @@ func TestBackend_StorePayload(t *testing.T) { A: "foo", B: 3, }, - object: objects.NewPipelineRunObject(&v1beta1.PipelineRun{ + object: objects.NewPipelineRunObjectV1Beta1(&v1beta1.PipelineRun{ //nolint:staticcheck ObjectMeta: metav1.ObjectMeta{ Name: "foo", Namespace: "bar", diff --git a/pkg/chains/verifier.go b/pkg/chains/verifier.go index bd0964567a..13b2e3ada3 100644 --- a/pkg/chains/verifier.go +++ b/pkg/chains/verifier.go @@ -21,6 +21,7 @@ import ( "github.com/tektoncd/chains/pkg/chains/objects" "github.com/tektoncd/chains/pkg/chains/storage" "github.com/tektoncd/chains/pkg/config" + v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned" "k8s.io/apimachinery/pkg/util/sets" @@ -29,7 +30,7 @@ import ( ) type Verifier interface { - VerifyTaskRun(ctx context.Context, tr *v1beta1.TaskRun) error + VerifyTaskRun(ctx context.Context, tr *v1.TaskRun) error } type TaskRunVerifier struct { @@ -38,7 +39,7 @@ type TaskRunVerifier struct { SecretPath string } -func (tv *TaskRunVerifier) VerifyTaskRun(ctx context.Context, tr *v1beta1.TaskRun) error { +func (tv *TaskRunVerifier) VerifyTaskRun(ctx context.Context, tr *v1.TaskRun) error { // Get all the things we might need (storage backends, signers and formatters) cfg := *config.FromContext(ctx) logger := logging.FromContext(ctx) @@ -50,7 +51,13 @@ func (tv *TaskRunVerifier) VerifyTaskRun(ctx context.Context, tr *v1beta1.TaskRu &artifacts.OCIArtifact{}, } - trObj := objects.NewTaskRunObject(tr) + // TODO(https://github.com/tektoncd/chains/issues/1026) add support for passing v1 object (vs converted v1beta1) for v2alpha3+ + trV1Beta1 := &v1beta1.TaskRun{} //nolint:staticcheck + if err := trV1Beta1.ConvertFrom(ctx, tr); err != nil { + return err + } + + trObj := objects.NewTaskRunObjectV1Beta1(trV1Beta1) // Storage allBackends, err := storage.InitializeBackends(ctx, tv.Pipelineclientset, tv.KubeClient, cfg) diff --git a/pkg/config/config.go b/pkg/config/config.go index 15c49e878e..333582adfa 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -272,7 +272,7 @@ func NewConfigFromMap(data map[string]string) (*Config, error) { // PipelineRuns asString(pipelinerunFormatKey, &cfg.Artifacts.PipelineRuns.Format, "in-toto", "slsa/v1", "slsa/v2alpha2"), - asStringSet(pipelinerunStorageKey, &cfg.Artifacts.PipelineRuns.StorageBackend, sets.New[string]("tekton", "oci", "docdb", "grafeas")), + asStringSet(pipelinerunStorageKey, &cfg.Artifacts.PipelineRuns.StorageBackend, sets.New[string]("tekton", "oci", "gcs", "docdb", "grafeas")), asString(pipelinerunSignerKey, &cfg.Artifacts.PipelineRuns.Signer, "x509", "kms"), asBool(pipelinerunEnableDeepInspectionKey, &cfg.Artifacts.PipelineRuns.DeepInspectionEnabled), diff --git a/pkg/internal/objectloader/objectloader.go b/pkg/internal/objectloader/objectloader.go index b2cb90f9a7..cc0c6e4847 100644 --- a/pkg/internal/objectloader/objectloader.go +++ b/pkg/internal/objectloader/objectloader.go @@ -20,27 +20,52 @@ import ( "encoding/json" "os" + v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" ) -func TaskRunFromFile(f string) (*v1beta1.TaskRun, error) { +func TaskRunFromFile(f string) (*v1.TaskRun, error) { contents, err := os.ReadFile(f) if err != nil { return nil, err } - var tr v1beta1.TaskRun + var tr v1.TaskRun if err := json.Unmarshal(contents, &tr); err != nil { return nil, err } return &tr, nil } -func PipelineRunFromFile(f string) (*v1beta1.PipelineRun, error) { +func PipelineRunFromFile(f string) (*v1.PipelineRun, error) { contents, err := os.ReadFile(f) if err != nil { return nil, err } - var pr v1beta1.PipelineRun + var pr v1.PipelineRun + if err := json.Unmarshal(contents, &pr); err != nil { + return nil, err + } + return &pr, nil +} + +func TaskRunV1Beta1FromFile(f string) (*v1beta1.TaskRun, error) { //nolint:staticcheck + contents, err := os.ReadFile(f) + if err != nil { + return nil, err + } + var tr v1beta1.TaskRun //nolint:staticcheck + if err := json.Unmarshal(contents, &tr); err != nil { + return nil, err + } + return &tr, nil +} + +func PipelineRunV1Beta1FromFile(f string) (*v1beta1.PipelineRun, error) { //nolint:staticcheck + contents, err := os.ReadFile(f) + if err != nil { + return nil, err + } + var pr v1beta1.PipelineRun //nolint:staticcheck if err := json.Unmarshal(contents, &pr); err != nil { return nil, err } diff --git a/pkg/reconciler/pipelinerun/controller.go b/pkg/reconciler/pipelinerun/controller.go index bc3d7ad47e..8f5ff637ba 100644 --- a/pkg/reconciler/pipelinerun/controller.go +++ b/pkg/reconciler/pipelinerun/controller.go @@ -19,11 +19,11 @@ import ( "github.com/tektoncd/chains/pkg/chains" "github.com/tektoncd/chains/pkg/chains/storage" "github.com/tektoncd/chains/pkg/config" - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" pipelineclient "github.com/tektoncd/pipeline/pkg/client/injection/client" - pipelineruninformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/pipelinerun" - taskruninformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/taskrun" - pipelinerunreconciler "github.com/tektoncd/pipeline/pkg/client/injection/reconciler/pipeline/v1beta1/pipelinerun" + pipelineruninformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1/pipelinerun" + taskruninformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1/taskrun" + pipelinerunreconciler "github.com/tektoncd/pipeline/pkg/client/injection/reconciler/pipeline/v1/pipelinerun" "k8s.io/client-go/tools/cache" kubeclient "knative.dev/pkg/client/injection/kube/client" "knative.dev/pkg/configmap" @@ -77,12 +77,16 @@ func NewController(ctx context.Context, cmw configmap.Watcher) *controller.Impl c.Tracker = impl.Tracker - pipelineRunInformer.Informer().AddEventHandler(controller.HandleAll(impl.Enqueue)) + if _, err := pipelineRunInformer.Informer().AddEventHandler(controller.HandleAll(impl.Enqueue)); err != nil { + logger.Errorf("adding event handler for pipelinerun controller's pipelinerun informer encountered error: %w", err) + } - taskRunInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ - FilterFunc: controller.FilterController(&v1beta1.PipelineRun{}), + if _, err := taskRunInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{ + FilterFunc: controller.FilterController(&v1.PipelineRun{}), Handler: controller.HandleAll(impl.EnqueueControllerOf), - }) + }); err != nil { + logger.Errorf("adding event handler for pipelinerun controller's taskrun informer encountered error: %w", err) + } return impl } diff --git a/pkg/reconciler/pipelinerun/pipelinerun.go b/pkg/reconciler/pipelinerun/pipelinerun.go index 069aa80d36..11ea60509b 100644 --- a/pkg/reconciler/pipelinerun/pipelinerun.go +++ b/pkg/reconciler/pipelinerun/pipelinerun.go @@ -19,10 +19,10 @@ import ( signing "github.com/tektoncd/chains/pkg/chains" "github.com/tektoncd/chains/pkg/chains/objects" - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" "github.com/tektoncd/pipeline/pkg/client/clientset/versioned" - pipelinerunreconciler "github.com/tektoncd/pipeline/pkg/client/injection/reconciler/pipeline/v1beta1/pipelinerun" - listers "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1beta1" + pipelinerunreconciler "github.com/tektoncd/pipeline/pkg/client/injection/reconciler/pipeline/v1/pipelinerun" + listers "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1" "k8s.io/apimachinery/pkg/api/errors" "knative.dev/pkg/logging" pkgreconciler "knative.dev/pkg/reconciler" @@ -47,7 +47,7 @@ var _ pipelinerunreconciler.Finalizer = (*Reconciler)(nil) // ReconcileKind handles a changed or created PipelineRun. // This is the main entrypoint for chains business logic. -func (r *Reconciler) ReconcileKind(ctx context.Context, pr *v1beta1.PipelineRun) pkgreconciler.Event { +func (r *Reconciler) ReconcileKind(ctx context.Context, pr *v1.PipelineRun) pkgreconciler.Event { log := logging.FromContext(ctx).With("pipelinerun", fmt.Sprintf("%s/%s", pr.Namespace, pr.Name)) return r.FinalizeKind(logging.WithLogger(ctx, log), pr) } @@ -56,13 +56,13 @@ func (r *Reconciler) ReconcileKind(ctx context.Context, pr *v1beta1.PipelineRun) // We utilize finalizers to ensure that we get a crack at signing every pipelinerun // that we see flowing through the system. If we don't add a finalizer, it could // get cleaned up before we see the final state and sign it. -func (r *Reconciler) FinalizeKind(ctx context.Context, pr *v1beta1.PipelineRun) pkgreconciler.Event { +func (r *Reconciler) FinalizeKind(ctx context.Context, pr *v1.PipelineRun) pkgreconciler.Event { // Check to make sure the PipelineRun is finished. if !pr.IsDone() { logging.FromContext(ctx).Infof("pipelinerun is still running") return nil } - pro := objects.NewPipelineRunObject(pr) + pro := objects.NewPipelineRunObjectV1(pr) // Check to see if it has already been signed. if signing.Reconciled(ctx, r.Pipelineclientset, pro) { @@ -72,21 +72,8 @@ func (r *Reconciler) FinalizeKind(ctx context.Context, pr *v1beta1.PipelineRun) // Get TaskRun names depending on whether embeddedstatus feature is set or not var trs []string - if len(pr.Status.ChildReferences) == 0 || len(pr.Status.TaskRuns) > 0 || len(pr.Status.Runs) > 0 { //nolint:all //incompatible with pipelines v0.45 - for trName, ptrs := range pr.Status.TaskRuns { //nolint:all //incompatible with pipelines v0.45 - // TaskRuns within a PipelineRun may not have been finalized yet if the PipelineRun timeout - // has exceeded. Wait to process the PipelineRun on the next update, see - // https://github.com/tektoncd/pipeline/issues/4916 - if ptrs.Status == nil || ptrs.Status.CompletionTime == nil { - logging.FromContext(ctx).Infof("taskrun %s within pipelinerun is not yet finalized: embedded status is not complete", trName) - return nil - } - trs = append(trs, trName) - } - } else { - for _, cr := range pr.Status.ChildReferences { - trs = append(trs, cr.Name) - } + for _, cr := range pr.Status.ChildReferences { + trs = append(trs, cr.Name) } // Signing both taskruns and pipelineruns causes a race condition when using oci storage @@ -111,7 +98,7 @@ func (r *Reconciler) FinalizeKind(ctx context.Context, pr *v1beta1.PipelineRun) logging.FromContext(ctx).Infof("taskrun %s within pipelinerun is not yet finalized: status is not complete", name) return r.trackTaskRun(tr, pr) } - reconciled := signing.Reconciled(ctx, r.Pipelineclientset, objects.NewTaskRunObject(tr)) + reconciled := signing.Reconciled(ctx, r.Pipelineclientset, objects.NewTaskRunObjectV1(tr)) if !reconciled { logging.FromContext(ctx).Infof("taskrun %s within pipelinerun is not yet reconciled", name) return r.trackTaskRun(tr, pr) @@ -125,9 +112,9 @@ func (r *Reconciler) FinalizeKind(ctx context.Context, pr *v1beta1.PipelineRun) return nil } -func (r *Reconciler) trackTaskRun(tr *v1beta1.TaskRun, pr *v1beta1.PipelineRun) error { +func (r *Reconciler) trackTaskRun(tr *v1.TaskRun, pr *v1.PipelineRun) error { ref := tracker.Reference{ - APIVersion: "tekton.dev/v1beta1", + APIVersion: "tekton.dev/v1", Kind: "TaskRun", Namespace: tr.Namespace, Name: tr.Name, diff --git a/pkg/reconciler/pipelinerun/pipelinerun_test.go b/pkg/reconciler/pipelinerun/pipelinerun_test.go index be6bb0aa7d..3d7235172c 100644 --- a/pkg/reconciler/pipelinerun/pipelinerun_test.go +++ b/pkg/reconciler/pipelinerun/pipelinerun_test.go @@ -23,14 +23,13 @@ import ( "github.com/tektoncd/chains/pkg/config" "github.com/tektoncd/chains/pkg/internal/mocksigner" "github.com/tektoncd/chains/pkg/test/tekton" - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" - informers "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1beta1" + v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" + informers "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1" fakepipelineclient "github.com/tektoncd/pipeline/pkg/client/injection/client/fake" - fakepipelineruninformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/pipelinerun/fake" - faketaskruninformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/taskrun/fake" + fakepipelineruninformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1/pipelinerun/fake" + faketaskruninformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1/taskrun/fake" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "knative.dev/pkg/apis" duckv1 "knative.dev/pkg/apis/duck/v1" @@ -46,17 +45,17 @@ func TestReconciler_Reconcile(t *testing.T) { tests := []struct { name string key string - pipelineRuns []*v1beta1.PipelineRun + pipelineRuns []*v1.PipelineRun }{ { name: "no pipelineRuns", key: "foo/bar", - pipelineRuns: []*v1beta1.PipelineRun{}, + pipelineRuns: []*v1.PipelineRun{}, }, { name: "found PipelineRun", key: "foo/bar", - pipelineRuns: []*v1beta1.PipelineRun{ + pipelineRuns: []*v1.PipelineRun{ { ObjectMeta: metav1.ObjectMeta{ Name: "bar", @@ -93,13 +92,13 @@ func TestReconciler_Reconcile(t *testing.T) { } } -func setupData(ctx context.Context, t *testing.T, prs []*v1beta1.PipelineRun) informers.PipelineRunInformer { +func setupData(ctx context.Context, t *testing.T, prs []*v1.PipelineRun) informers.PipelineRunInformer { pri := fakepipelineruninformer.Get(ctx) c := fakepipelineclient.Get(ctx) for _, pa := range prs { pa := pa.DeepCopy() // Avoid assumptions that the informer's copy is modified. - if _, err := c.TektonV1beta1().PipelineRuns(pa.Namespace).Create(ctx, pa, metav1.CreateOptions{}); err != nil { + if _, err := c.TektonV1().PipelineRuns(pa.Namespace).Create(ctx, pa, metav1.CreateOptions{}); err != nil { t.Fatal(err) } } @@ -111,20 +110,20 @@ func TestReconciler_handlePipelineRun(t *testing.T) { tests := []struct { name string - pr *v1beta1.PipelineRun - taskruns []*v1beta1.TaskRun + pr *v1.PipelineRun + taskruns []*v1.TaskRun shouldSign bool wantErr bool }{ { name: "complete, already signed", - pr: &v1beta1.PipelineRun{ + pr: &v1.PipelineRun{ ObjectMeta: metav1.ObjectMeta{ Name: "pipelinerun", Namespace: "default", Annotations: map[string]string{signing.ChainsAnnotation: "true"}, }, - Status: v1beta1.PipelineRunStatus{ + Status: v1.PipelineRunStatus{ Status: duckv1.Status{ Conditions: []apis.Condition{{Type: apis.ConditionSucceeded}}, }}, @@ -133,13 +132,13 @@ func TestReconciler_handlePipelineRun(t *testing.T) { }, { name: "complete, not already signed", - pr: &v1beta1.PipelineRun{ + pr: &v1.PipelineRun{ ObjectMeta: metav1.ObjectMeta{ Name: "pipelinerun", Namespace: "default", Annotations: map[string]string{}, }, - Status: v1beta1.PipelineRunStatus{ + Status: v1.PipelineRunStatus{ Status: duckv1.Status{ Conditions: []apis.Condition{{Type: apis.ConditionSucceeded}}, }}, @@ -148,13 +147,13 @@ func TestReconciler_handlePipelineRun(t *testing.T) { }, { name: "not complete, not already signed", - pr: &v1beta1.PipelineRun{ + pr: &v1.PipelineRun{ ObjectMeta: metav1.ObjectMeta{ Name: "pipelinerun", Namespace: "default", Annotations: map[string]string{}, }, - Status: v1beta1.PipelineRunStatus{ + Status: v1.PipelineRunStatus{ Status: duckv1.Status{ Conditions: []apis.Condition{}, }}, @@ -163,31 +162,19 @@ func TestReconciler_handlePipelineRun(t *testing.T) { }, { name: "taskruns completed with full taskrun status", - pr: &v1beta1.PipelineRun{ + pr: &v1.PipelineRun{ ObjectMeta: metav1.ObjectMeta{ Name: "pipelinerun", Namespace: "default", Annotations: map[string]string{}, }, - Status: v1beta1.PipelineRunStatus{ + Status: v1.PipelineRunStatus{ Status: duckv1.Status{ Conditions: []apis.Condition{{Type: apis.ConditionSucceeded}}, }, - PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{ - TaskRuns: map[string]*v1beta1.PipelineRunTaskRunStatus{ - "taskrun1": { - PipelineTaskName: "task1", - Status: &v1beta1.TaskRunStatus{ - TaskRunStatusFields: v1beta1.TaskRunStatusFields{ - CompletionTime: &metav1.Time{}, - }, - }, - }, - }, - }, }, }, - taskruns: []*v1beta1.TaskRun{ + taskruns: []*v1.TaskRun{ { ObjectMeta: metav1.ObjectMeta{ Name: "taskrun1", @@ -196,9 +183,9 @@ func TestReconciler_handlePipelineRun(t *testing.T) { "chains.tekton.dev/signed": "true", }, }, - Status: v1beta1.TaskRunStatus{ - TaskRunStatusFields: v1beta1.TaskRunStatusFields{ - CompletionTime: &v1.Time{Time: time.Date(1995, time.December, 24, 6, 12, 12, 24, time.UTC)}, + Status: v1.TaskRunStatus{ + TaskRunStatusFields: v1.TaskRunStatusFields{ + CompletionTime: &metav1.Time{Time: time.Date(1995, time.December, 24, 6, 12, 12, 24, time.UTC)}, }, }, }, @@ -208,19 +195,19 @@ func TestReconciler_handlePipelineRun(t *testing.T) { }, { name: "taskruns completed with child references", - pr: &v1beta1.PipelineRun{ + pr: &v1.PipelineRun{ ObjectMeta: metav1.ObjectMeta{ Name: "pipelinerun", Namespace: "default", Annotations: map[string]string{}, }, - Status: v1beta1.PipelineRunStatus{ + Status: v1.PipelineRunStatus{ Status: duckv1.Status{ Conditions: []apis.Condition{{Type: apis.ConditionSucceeded}}, }, - PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{ - ChildReferences: []v1beta1.ChildStatusReference{ - v1beta1.ChildStatusReference{ + PipelineRunStatusFields: v1.PipelineRunStatusFields{ + ChildReferences: []v1.ChildStatusReference{ + { Name: "taskrun1", PipelineTaskName: "task1", }, @@ -228,7 +215,7 @@ func TestReconciler_handlePipelineRun(t *testing.T) { }, }, }, - taskruns: []*v1beta1.TaskRun{ + taskruns: []*v1.TaskRun{ { ObjectMeta: metav1.ObjectMeta{ Name: "taskrun1", @@ -237,9 +224,9 @@ func TestReconciler_handlePipelineRun(t *testing.T) { "chains.tekton.dev/signed": "true", }, }, - Status: v1beta1.TaskRunStatus{ - TaskRunStatusFields: v1beta1.TaskRunStatusFields{ - CompletionTime: &v1.Time{Time: time.Date(1995, time.December, 24, 6, 12, 12, 24, time.UTC)}, + Status: v1.TaskRunStatus{ + TaskRunStatusFields: v1.TaskRunStatusFields{ + CompletionTime: &metav1.Time{Time: time.Date(1995, time.December, 24, 6, 12, 12, 24, time.UTC)}, }, }, }, @@ -247,58 +234,21 @@ func TestReconciler_handlePipelineRun(t *testing.T) { shouldSign: true, wantErr: false, }, - { - name: "taskruns not yet completed", - pr: &v1beta1.PipelineRun{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pipelinerun", - Namespace: "default", - Annotations: map[string]string{}, - }, - Status: v1beta1.PipelineRunStatus{ - Status: duckv1.Status{ - Conditions: []apis.Condition{{Type: apis.ConditionSucceeded}}, - }, - PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{ - TaskRuns: map[string]*v1beta1.PipelineRunTaskRunStatus{ - "taskrun1": { - PipelineTaskName: "task1", - Status: &v1beta1.TaskRunStatus{ - TaskRunStatusFields: v1beta1.TaskRunStatusFields{ - CompletionTime: &metav1.Time{}, - }, - }, - }, - }, - }, - }, - }, - taskruns: []*v1beta1.TaskRun{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "taskrun1", - Namespace: "default", - }, - }, - }, - shouldSign: false, - wantErr: true, - }, { name: "taskruns not yet completed with child references", - pr: &v1beta1.PipelineRun{ + pr: &v1.PipelineRun{ ObjectMeta: metav1.ObjectMeta{ Name: "pipelinerun", Namespace: "default", Annotations: map[string]string{}, }, - Status: v1beta1.PipelineRunStatus{ + Status: v1.PipelineRunStatus{ Status: duckv1.Status{ Conditions: []apis.Condition{{Type: apis.ConditionSucceeded}}, }, - PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{ - ChildReferences: []v1beta1.ChildStatusReference{ - v1beta1.ChildStatusReference{ + PipelineRunStatusFields: v1.PipelineRunStatusFields{ + ChildReferences: []v1.ChildStatusReference{ + { Name: "taskrun1", PipelineTaskName: "task1", }, @@ -306,7 +256,7 @@ func TestReconciler_handlePipelineRun(t *testing.T) { }, }, }, - taskruns: []*v1beta1.TaskRun{ + taskruns: []*v1.TaskRun{ { ObjectMeta: metav1.ObjectMeta{ Name: "taskrun1", @@ -317,50 +267,21 @@ func TestReconciler_handlePipelineRun(t *testing.T) { shouldSign: false, wantErr: true, }, - { - name: "missing taskrun", - pr: &v1beta1.PipelineRun{ - ObjectMeta: metav1.ObjectMeta{ - Name: "pipelinerun", - Namespace: "default", - Annotations: map[string]string{}, - }, - Status: v1beta1.PipelineRunStatus{ - Status: duckv1.Status{ - Conditions: []apis.Condition{{Type: apis.ConditionSucceeded}}, - }, - PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{ - TaskRuns: map[string]*v1beta1.PipelineRunTaskRunStatus{ - "taskrun1": { - PipelineTaskName: "task1", - Status: &v1beta1.TaskRunStatus{ - TaskRunStatusFields: v1beta1.TaskRunStatusFields{ - CompletionTime: &metav1.Time{}, - }, - }, - }, - }, - }, - }, - }, - shouldSign: false, - wantErr: false, - }, { name: "missing taskrun with child references", - pr: &v1beta1.PipelineRun{ + pr: &v1.PipelineRun{ ObjectMeta: metav1.ObjectMeta{ Name: "pipelinerun", Namespace: "default", Annotations: map[string]string{}, }, - Status: v1beta1.PipelineRunStatus{ + Status: v1.PipelineRunStatus{ Status: duckv1.Status{ Conditions: []apis.Condition{{Type: apis.ConditionSucceeded}}, }, - PipelineRunStatusFields: v1beta1.PipelineRunStatusFields{ - ChildReferences: []v1beta1.ChildStatusReference{ - v1beta1.ChildStatusReference{ + PipelineRunStatusFields: v1.PipelineRunStatusFields{ + ChildReferences: []v1.ChildStatusReference{ + { Name: "taskrun1", PipelineTaskName: "task1", }, @@ -377,7 +298,7 @@ func TestReconciler_handlePipelineRun(t *testing.T) { signer := &mocksigner.Signer{} ctx, _ := rtesting.SetupFakeContext(t) c := fakepipelineclient.Get(ctx) - tekton.CreateObject(t, ctx, c, objects.NewPipelineRunObject(tt.pr)) + tekton.CreateObject(t, ctx, c, objects.NewPipelineRunObjectV1(tt.pr)) tri := faketaskruninformer.Get(ctx) r := &Reconciler{ diff --git a/pkg/reconciler/taskrun/controller.go b/pkg/reconciler/taskrun/controller.go index dbbb1cdab9..6d2eea061e 100644 --- a/pkg/reconciler/taskrun/controller.go +++ b/pkg/reconciler/taskrun/controller.go @@ -20,8 +20,8 @@ import ( "github.com/tektoncd/chains/pkg/chains/storage" "github.com/tektoncd/chains/pkg/config" pipelineclient "github.com/tektoncd/pipeline/pkg/client/injection/client" - taskruninformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/taskrun" - taskrunreconciler "github.com/tektoncd/pipeline/pkg/client/injection/reconciler/pipeline/v1beta1/taskrun" + taskruninformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1/taskrun" + taskrunreconciler "github.com/tektoncd/pipeline/pkg/client/injection/reconciler/pipeline/v1/taskrun" kubeclient "knative.dev/pkg/client/injection/kube/client" "knative.dev/pkg/configmap" "knative.dev/pkg/controller" @@ -70,7 +70,9 @@ func NewController(ctx context.Context, cmw configmap.Watcher) *controller.Impl } }) - taskRunInformer.Informer().AddEventHandler(controller.HandleAll(impl.Enqueue)) + if _, err := taskRunInformer.Informer().AddEventHandler(controller.HandleAll(impl.Enqueue)); err != nil { + logger.Errorf("adding event handler for taskrun controller's taskrun informer encountered error: %w", err) + } return impl } diff --git a/pkg/reconciler/taskrun/taskrun.go b/pkg/reconciler/taskrun/taskrun.go index 18fa87d15b..2cc7da1077 100644 --- a/pkg/reconciler/taskrun/taskrun.go +++ b/pkg/reconciler/taskrun/taskrun.go @@ -18,9 +18,9 @@ import ( signing "github.com/tektoncd/chains/pkg/chains" "github.com/tektoncd/chains/pkg/chains/objects" - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" "github.com/tektoncd/pipeline/pkg/client/clientset/versioned" - taskrunreconciler "github.com/tektoncd/pipeline/pkg/client/injection/reconciler/pipeline/v1beta1/taskrun" + taskrunreconciler "github.com/tektoncd/pipeline/pkg/client/injection/reconciler/pipeline/v1/taskrun" "knative.dev/pkg/logging" pkgreconciler "knative.dev/pkg/reconciler" ) @@ -41,7 +41,7 @@ var _ taskrunreconciler.Finalizer = (*Reconciler)(nil) // ReconcileKind handles a changed or created TaskRun. // This is the main entrypoint for chains business logic. -func (r *Reconciler) ReconcileKind(ctx context.Context, tr *v1beta1.TaskRun) pkgreconciler.Event { +func (r *Reconciler) ReconcileKind(ctx context.Context, tr *v1.TaskRun) pkgreconciler.Event { return r.FinalizeKind(ctx, tr) } @@ -49,14 +49,14 @@ func (r *Reconciler) ReconcileKind(ctx context.Context, tr *v1beta1.TaskRun) pkg // We utilize finalizers to ensure that we get a crack at signing every taskrun // that we see flowing through the system. If we don't add a finalizer, it could // get cleaned up before we see the final state and sign it. -func (r *Reconciler) FinalizeKind(ctx context.Context, tr *v1beta1.TaskRun) pkgreconciler.Event { +func (r *Reconciler) FinalizeKind(ctx context.Context, tr *v1.TaskRun) pkgreconciler.Event { // Check to make sure the TaskRun is finished. if !tr.IsDone() { logging.FromContext(ctx).Infof("taskrun %s/%s is still running", tr.Namespace, tr.Name) return nil } - obj := objects.NewTaskRunObject(tr) + obj := objects.NewTaskRunObjectV1(tr) // Check to see if it has already been signed. if signing.Reconciled(ctx, r.Pipelineclientset, obj) { diff --git a/pkg/reconciler/taskrun/taskrun_test.go b/pkg/reconciler/taskrun/taskrun_test.go index d0e8a2b36c..5335b44f85 100644 --- a/pkg/reconciler/taskrun/taskrun_test.go +++ b/pkg/reconciler/taskrun/taskrun_test.go @@ -22,10 +22,10 @@ import ( "github.com/tektoncd/chains/pkg/config" "github.com/tektoncd/chains/pkg/internal/mocksigner" "github.com/tektoncd/chains/pkg/test/tekton" - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" - informers "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1beta1" + v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" + informers "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1" fakepipelineclient "github.com/tektoncd/pipeline/pkg/client/injection/client/fake" - faketaskruninformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/taskrun/fake" + faketaskruninformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1/taskrun/fake" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -42,17 +42,17 @@ func TestReconciler_Reconcile(t *testing.T) { tests := []struct { name string key string - taskRuns []*v1beta1.TaskRun + taskRuns []*v1.TaskRun }{ { name: "no taskruns", key: "foo/bar", - taskRuns: []*v1beta1.TaskRun{}, + taskRuns: []*v1.TaskRun{}, }, { name: "found taskrun", key: "foo/bar", - taskRuns: []*v1beta1.TaskRun{ + taskRuns: []*v1.TaskRun{ { ObjectMeta: metav1.ObjectMeta{ Name: "bar", @@ -89,13 +89,13 @@ func TestReconciler_Reconcile(t *testing.T) { } } -func setupData(ctx context.Context, t *testing.T, trs []*v1beta1.TaskRun) informers.TaskRunInformer { +func setupData(ctx context.Context, t *testing.T, trs []*v1.TaskRun) informers.TaskRunInformer { tri := faketaskruninformer.Get(ctx) c := fakepipelineclient.Get(ctx) for _, ta := range trs { ta := ta.DeepCopy() // Avoid assumptions that the informer's copy is modified. - if _, err := c.TektonV1beta1().TaskRuns(ta.Namespace).Create(ctx, ta, metav1.CreateOptions{}); err != nil { + if _, err := c.TektonV1().TaskRuns(ta.Namespace).Create(ctx, ta, metav1.CreateOptions{}); err != nil { t.Fatal(err) } } @@ -107,16 +107,16 @@ func TestReconciler_handleTaskRun(t *testing.T) { tests := []struct { name string - tr *v1beta1.TaskRun + tr *v1.TaskRun shouldSign bool }{ { name: "complete, already signed", - tr: &v1beta1.TaskRun{ + tr: &v1.TaskRun{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{signing.ChainsAnnotation: "true"}, }, - Status: v1beta1.TaskRunStatus{ + Status: v1.TaskRunStatus{ Status: duckv1.Status{ Conditions: []apis.Condition{{Type: apis.ConditionSucceeded}}, }}, @@ -125,11 +125,11 @@ func TestReconciler_handleTaskRun(t *testing.T) { }, { name: "complete, not already signed", - tr: &v1beta1.TaskRun{ + tr: &v1.TaskRun{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{}, }, - Status: v1beta1.TaskRunStatus{ + Status: v1.TaskRunStatus{ Status: duckv1.Status{ Conditions: []apis.Condition{{Type: apis.ConditionSucceeded}}, }}, @@ -138,11 +138,11 @@ func TestReconciler_handleTaskRun(t *testing.T) { }, { name: "not complete, not already signed", - tr: &v1beta1.TaskRun{ + tr: &v1.TaskRun{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{}, }, - Status: v1beta1.TaskRunStatus{ + Status: v1.TaskRunStatus{ Status: duckv1.Status{ Conditions: []apis.Condition{}, }}, @@ -155,12 +155,13 @@ func TestReconciler_handleTaskRun(t *testing.T) { signer := &mocksigner.Signer{} ctx, _ := rtesting.SetupFakeContext(t) c := fakepipelineclient.Get(ctx) - tekton.CreateObject(t, ctx, c, objects.NewTaskRunObject(tt.tr)) + tekton.CreateObject(t, ctx, c, objects.NewTaskRunObjectV1(tt.tr)) r := &Reconciler{ TaskRunSigner: signer, Pipelineclientset: c, } + ctx = config.ToContext(ctx, &config.Config{}) if err := r.ReconcileKind(ctx, tt.tr); err != nil { t.Errorf("Reconciler.handleTaskRun() error = %v", err) } diff --git a/pkg/test/tekton/tekton.go b/pkg/test/tekton/tekton.go index d74bf1461c..dd2014fe31 100644 --- a/pkg/test/tekton/tekton.go +++ b/pkg/test/tekton/tekton.go @@ -19,6 +19,7 @@ import ( "testing" "github.com/tektoncd/chains/pkg/chains/objects" + v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" pipelineclientset "github.com/tektoncd/pipeline/pkg/client/clientset/versioned" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -27,28 +28,47 @@ import ( func CreateObject(t *testing.T, ctx context.Context, ps pipelineclientset.Interface, obj objects.TektonObject) objects.TektonObject { switch o := obj.GetObject().(type) { - case *v1beta1.PipelineRun: + case *v1.PipelineRun: + pr, err := ps.TektonV1().PipelineRuns(obj.GetNamespace()).Create(ctx, o, metav1.CreateOptions{}) + if err != nil { + t.Fatalf("error creating pipelinerun: %v", err) + } + return objects.NewPipelineRunObjectV1(pr) + case *v1.TaskRun: + tr, err := ps.TektonV1().TaskRuns(obj.GetNamespace()).Create(ctx, o, metav1.CreateOptions{}) + if err != nil { + t.Fatalf("error creating taskrun: %v", err) + } + return objects.NewTaskRunObjectV1(tr) + case *v1beta1.PipelineRun: //nolint:staticcheck pr, err := ps.TektonV1beta1().PipelineRuns(obj.GetNamespace()).Create(ctx, o, metav1.CreateOptions{}) if err != nil { t.Fatalf("error creating pipelinerun: %v", err) } - return objects.NewPipelineRunObject(pr) - case *v1beta1.TaskRun: + return objects.NewPipelineRunObjectV1Beta1(pr) + case *v1beta1.TaskRun: //nolint:staticcheck tr, err := ps.TektonV1beta1().TaskRuns(obj.GetNamespace()).Create(ctx, o, metav1.CreateOptions{}) if err != nil { t.Fatalf("error creating taskrun: %v", err) } - return objects.NewTaskRunObject(tr) + return objects.NewTaskRunObjectV1Beta1(tr) } return nil } // Passing in TektonObject since it encapsulates namespace, name, and type. func GetObject(t *testing.T, ctx context.Context, ps pipelineclientset.Interface, obj objects.TektonObject) (objects.TektonObject, error) { + if obj == nil { + t.Fatalf("nil object received %T", obj.GetObject()) + } switch obj.GetObject().(type) { - case *v1beta1.PipelineRun: + case *v1.PipelineRun: return GetPipelineRun(t, ctx, ps, obj.GetNamespace(), obj.GetName()) - case *v1beta1.TaskRun: + case *v1.TaskRun: + return GetTaskRun(t, ctx, ps, obj.GetNamespace(), obj.GetName()) + case *v1beta1.PipelineRun: //nolint:staticcheck + return GetPipelineRun(t, ctx, ps, obj.GetNamespace(), obj.GetName()) + case *v1beta1.TaskRun: //nolint:staticcheck return GetTaskRun(t, ctx, ps, obj.GetNamespace(), obj.GetName()) } t.Fatalf("unknown object type %T", obj.GetObject()) @@ -56,29 +76,39 @@ func GetObject(t *testing.T, ctx context.Context, ps pipelineclientset.Interface } func GetPipelineRun(t *testing.T, ctx context.Context, ps pipelineclientset.Interface, namespace, name string) (objects.TektonObject, error) { - pr, err := ps.TektonV1beta1().PipelineRuns(namespace).Get(ctx, name, metav1.GetOptions{}) + pr, err := ps.TektonV1().PipelineRuns(namespace).Get(ctx, name, metav1.GetOptions{}) if err != nil { t.Fatalf("error getting pipelinerun: %v", err) } - return objects.NewPipelineRunObject(pr), nil + return objects.NewPipelineRunObjectV1(pr), nil } func GetTaskRun(t *testing.T, ctx context.Context, ps pipelineclientset.Interface, namespace, name string) (objects.TektonObject, error) { - tr, err := ps.TektonV1beta1().TaskRuns(namespace).Get(ctx, name, metav1.GetOptions{}) + tr, err := ps.TektonV1().TaskRuns(namespace).Get(ctx, name, metav1.GetOptions{}) if err != nil { t.Fatalf("error getting taskrun: %v", err) } - return objects.NewTaskRunObject(tr), nil + return objects.NewTaskRunObjectV1(tr), nil } func WatchObject(t *testing.T, ctx context.Context, ps pipelineclientset.Interface, obj objects.TektonObject) (watch.Interface, error) { switch o := obj.GetObject().(type) { - case *v1beta1.PipelineRun: + case *v1.PipelineRun: + return ps.TektonV1().PipelineRuns(obj.GetNamespace()).Watch(ctx, metav1.SingleObject(metav1.ObjectMeta{ + Name: o.GetName(), + Namespace: o.GetNamespace(), + })) + case *v1.TaskRun: + return ps.TektonV1().TaskRuns(obj.GetNamespace()).Watch(ctx, metav1.SingleObject(metav1.ObjectMeta{ + Name: o.GetName(), + Namespace: o.GetNamespace(), + })) + case *v1beta1.PipelineRun: //nolint:staticcheck return ps.TektonV1beta1().PipelineRuns(obj.GetNamespace()).Watch(ctx, metav1.SingleObject(metav1.ObjectMeta{ Name: o.GetName(), Namespace: o.GetNamespace(), })) - case *v1beta1.TaskRun: + case *v1beta1.TaskRun: //nolint:staticcheck return ps.TektonV1beta1().TaskRuns(obj.GetNamespace()).Watch(ctx, metav1.SingleObject(metav1.ObjectMeta{ Name: o.GetName(), Namespace: o.GetNamespace(), diff --git a/test/clients.go b/test/clients.go index d6138af32f..da51ef6c1b 100644 --- a/test/clients.go +++ b/test/clients.go @@ -110,7 +110,7 @@ func setup(ctx context.Context, t *testing.T, opts setupOpts) (*clients, string, imageDest := fmt.Sprintf("%s/%s", c.internalRegistry, opts.kanikoTaskImage) t.Logf("Creating Kaniko task referencing image %s", imageDest) task := kanikoTask(t, namespace, imageDest) - if _, err := c.PipelineClient.TektonV1beta1().Tasks(namespace).Create(ctx, task, metav1.CreateOptions{}); err != nil { + if _, err := c.PipelineClient.TektonV1().Tasks(namespace).Create(ctx, task, metav1.CreateOptions{}); err != nil { t.Fatalf("error creating task: %s", err) } } diff --git a/test/e2e_test.go b/test/e2e_test.go index 1f8c135ed4..96e08e1dc8 100644 --- a/test/e2e_test.go +++ b/test/e2e_test.go @@ -21,6 +21,7 @@ package test import ( "bytes" + "context" "crypto" "crypto/ecdsa" "encoding/base64" @@ -45,6 +46,7 @@ import ( "github.com/tektoncd/chains/pkg/chains/objects" "github.com/tektoncd/chains/pkg/chains/provenance" "github.com/tektoncd/chains/pkg/test/tekton" + v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -214,10 +216,13 @@ func TestOCISigning(t *testing.T) { t.Cleanup(cleanup) // Setup the right config. - resetConfig := setConfigMap(ctx, t, c, map[string]string{"artifacts.oci.storage": "tekton", "artifacts.taskrun.format": "in-toto"}) + resetConfig := setConfigMap(ctx, t, c, map[string]string{ + "artifacts.oci.storage": "tekton", + "artifacts.taskrun.format": "in-toto", + }) t.Cleanup(resetConfig) - tro := getTaskRunObject(ns) + tro := getTaskRunObjectV1(ns) createdTro := tekton.CreateObject(t, ctx, c.PipelineClient, tro) @@ -234,8 +239,15 @@ func TestOCISigning(t *testing.T) { // Let's fetch the signature and body: t.Log(obj.GetAnnotations()) + if _, ok := obj.GetAnnotations()["chains.tekton.dev/signature-586789aa031f"]; !ok { + t.Fatal("TaskRun missing expected signature annotation: chains.tekton.dev/signature-586789aa031f") + } + if _, ok := obj.GetAnnotations()["chains.tekton.dev/payload-586789aa031f"]; !ok { + t.Fatal("TaskRun missing expected payload annotation: chains.tekton.dev/signature-586789aa031f") + } - sig, body := obj.GetAnnotations()["chains.tekton.dev/signature-05f95b26ed10"], obj.GetAnnotations()["chains.tekton.dev/payload-05f95b26ed10"] + sig, body := obj.GetAnnotations()["chains.tekton.dev/signature-586789aa031f"], + obj.GetAnnotations()["chains.tekton.dev/payload-586789aa031f"] // base64 decode them sigBytes, err := base64.StdEncoding.DecodeString(sig) if err != nil { @@ -417,7 +429,7 @@ func TestOCIStorage(t *testing.T) { imageName := "chains-test-oci-storage" image := fmt.Sprintf("%s/%s", c.internalRegistry, imageName) task := kanikoTask(t, ns, image) - if _, err := c.PipelineClient.TektonV1beta1().Tasks(ns).Create(ctx, task, metav1.CreateOptions{}); err != nil { + if _, err := c.PipelineClient.TektonV1().Tasks(ns).Create(ctx, task, metav1.CreateOptions{}); err != nil { t.Fatalf("error creating task: %s", err) } @@ -537,7 +549,7 @@ func TestRetryFailed(t *testing.T) { registry: true, kanikoTaskImage: "chains-test-tr-retryfailed", }, - getObject: getTaskRunObject, + getObject: getTaskRunObjectV1, }, { name: "pipelinerun", @@ -635,30 +647,61 @@ var imageTaskRun = v1beta1.TaskRun{ } func getTaskRunObject(ns string) objects.TektonObject { - o := objects.NewTaskRunObject(&imageTaskRun) + trV1 := &v1.TaskRun{} + imageTaskRun.ConvertTo(context.Background(), trV1) + o := objects.NewTaskRunObjectV1(trV1) + o.Namespace = ns + return o +} + +func getTaskRunObjectWithParams(ns string, params []v1.Param) objects.TektonObject { + trV1 := &v1.TaskRun{} + imageTaskRun.ConvertTo(context.Background(), trV1) + o := objects.NewTaskRunObjectV1(trV1) + o.Namespace = ns + o.Spec.Params = params + return o +} + +func taskRunFromFile(f string) (*v1.TaskRun, error) { + contents, err := os.ReadFile(f) + if err != nil { + return nil, err + } + var tr v1.TaskRun + if err := json.Unmarshal(contents, &tr); err != nil { + return nil, err + } + return &tr, nil +} + +func getTaskRunObjectV1(ns string) objects.TektonObject { + tr, _ := taskRunFromFile("testdata/type-hinting/taskrun.json") + o := objects.NewTaskRunObjectV1(tr) o.Namespace = ns return o } -func getTaskRunObjectWithParams(ns string, params []v1beta1.Param) objects.TektonObject { - o := objects.NewTaskRunObject(&imageTaskRun) +func getTaskRunObjectV1WithParams(ns string, params []v1.Param) objects.TektonObject { + tr, _ := taskRunFromFile("testdata/type-hinting/taskrun.json") + o := objects.NewTaskRunObjectV1(tr) o.Namespace = ns o.Spec.Params = params return o } -var imagePipelineRun = v1beta1.PipelineRun{ +var imagePipelineRun = v1.PipelineRun{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "image-pipelinerun", Annotations: map[string]string{chains.RekorAnnotation: "true"}, }, - Spec: v1beta1.PipelineRunSpec{ - PipelineSpec: &v1beta1.PipelineSpec{ - Tasks: []v1beta1.PipelineTask{{ + Spec: v1.PipelineRunSpec{ + PipelineSpec: &v1.PipelineSpec{ + Tasks: []v1.PipelineTask{{ Name: "echo", - TaskSpec: &v1beta1.EmbeddedTask{ - TaskSpec: v1beta1.TaskSpec{ - Steps: []v1beta1.Step{ + TaskSpec: &v1.EmbeddedTask{ + TaskSpec: v1.TaskSpec{ + Steps: []v1.Step{ { Image: "busybox", Script: "echo success", @@ -672,13 +715,13 @@ var imagePipelineRun = v1beta1.PipelineRun{ } func getPipelineRunObject(ns string) objects.TektonObject { - o := objects.NewPipelineRunObject(&imagePipelineRun) + o := objects.NewPipelineRunObjectV1(&imagePipelineRun) o.Namespace = ns return o } -func getPipelineRunObjectWithParams(ns string, params []v1beta1.Param) objects.TektonObject { - o := objects.NewPipelineRunObject(&imagePipelineRun) +func getPipelineRunObjectWithParams(ns string, params []v1.Param) objects.TektonObject { + o := objects.NewPipelineRunObjectV1(&imagePipelineRun) o.Namespace = ns o.Spec.Params = params return o @@ -688,7 +731,7 @@ func TestProvenanceMaterials(t *testing.T) { tests := []struct { name string cm map[string]string - getObjectWithParams func(ns string, params []v1beta1.Param) objects.TektonObject + getObjectWithParams func(ns string, params []v1.Param) objects.TektonObject payloadKey string }{ { @@ -725,10 +768,10 @@ func TestProvenanceMaterials(t *testing.T) { commit := "my-git-commit" url := "https://my-git-url" - params := []v1beta1.Param{{ - Name: "CHAINS-GIT_COMMIT", Value: *v1beta1.NewStructuredValues(commit), + params := []v1.Param{{ + Name: "CHAINS-GIT_COMMIT", Value: *v1.NewStructuredValues(commit), }, { - Name: "CHAINS-GIT_URL", Value: *v1beta1.NewStructuredValues(url), + Name: "CHAINS-GIT_URL", Value: *v1.NewStructuredValues(url), }} obj := test.getObjectWithParams(ns, params) @@ -774,9 +817,9 @@ func TestProvenanceMaterials(t *testing.T) { } if test.name == "pipelinerun" { - pr := signedObj.GetObject().(*v1beta1.PipelineRun) + pr := signedObj.GetObject().(*v1.PipelineRun) for _, cr := range pr.Status.ChildReferences { - taskRun, err := c.PipelineClient.TektonV1beta1().TaskRuns(ns).Get(ctx, cr.Name, metav1.GetOptions{}) + taskRun, err := c.PipelineClient.TektonV1().TaskRuns(ns).Get(ctx, cr.Name, metav1.GetOptions{}) if err != nil { t.Errorf("Did not expect an error but got %v", err) } @@ -790,7 +833,7 @@ func TestProvenanceMaterials(t *testing.T) { } } } else { - tr := signedObj.GetObject().(*v1beta1.TaskRun) + tr := signedObj.GetObject().(*v1.TaskRun) for _, step := range tr.Status.Steps { want = append(want, provenance.ProvenanceMaterial{ URI: artifacts.OCIScheme + "" + strings.Split(step.ImageID, "@")[0], diff --git a/test/examples_test.go b/test/examples_test.go index dcce933ac2..7924101d6e 100644 --- a/test/examples_test.go +++ b/test/examples_test.go @@ -46,7 +46,7 @@ import ( "github.com/tektoncd/chains/pkg/chains/objects" "github.com/tektoncd/chains/pkg/test/tekton" - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" "sigs.k8s.io/yaml" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -250,10 +250,10 @@ func (v *verifier) Public() crypto.PublicKey { func expectedProvenanceSLSA1(t *testing.T, ctx context.Context, example string, obj objects.TektonObject, outputLocation string, ns string, c *clients) intoto.ProvenanceStatementSLSA1 { switch obj.(type) { - case *objects.TaskRunObject: + case *objects.TaskRunObjectV1: f := expectedTaskRunProvenanceFormat(t, example, obj, outputLocation) return expectedAttestationSLSA1(t, example, f, outputLocation) - case *objects.PipelineRunObject: + case *objects.PipelineRunObjectV1: f := expectedPipelineRunProvenanceFormat(t, ctx, example, obj, outputLocation, ns, c) return expectedAttestationSLSA1(t, example, f, outputLocation) default: @@ -264,10 +264,10 @@ func expectedProvenanceSLSA1(t *testing.T, ctx context.Context, example string, func expectedProvenance(t *testing.T, ctx context.Context, example string, obj objects.TektonObject, outputLocation string, ns string, c *clients) intoto.ProvenanceStatement { switch obj.(type) { - case *objects.TaskRunObject: + case *objects.TaskRunObjectV1: f := expectedTaskRunProvenanceFormat(t, example, obj, outputLocation) return expectedAttestation(t, example, f, outputLocation) - case *objects.PipelineRunObject: + case *objects.PipelineRunObjectV1: f := expectedPipelineRunProvenanceFormat(t, ctx, example, obj, outputLocation, ns, c) return expectedAttestation(t, example, f, outputLocation) default: @@ -294,7 +294,7 @@ type Format struct { } func expectedTaskRunProvenanceFormat(t *testing.T, example string, obj objects.TektonObject, outputLocation string) Format { - tr := obj.GetObject().(*v1beta1.TaskRun) + tr := obj.GetObject().(*v1.TaskRun) name := tr.Name if tr.Spec.TaskRef != nil { @@ -330,7 +330,7 @@ func expectedTaskRunProvenanceFormat(t *testing.T, example string, obj objects.T } func expectedPipelineRunProvenanceFormat(t *testing.T, ctx context.Context, example string, obj objects.TektonObject, outputLocation string, ns string, c *clients) Format { - pr := obj.GetObject().(*v1beta1.PipelineRun) + pr := obj.GetObject().(*v1.PipelineRun) buildStartTimes := []string{} buildFinishedTimes := []string{} @@ -338,7 +338,7 @@ func expectedPipelineRunProvenanceFormat(t *testing.T, ctx context.Context, exam uriDigestSet := make(map[string]bool) for _, cr := range pr.Status.ChildReferences { - taskRun, err := c.PipelineClient.TektonV1beta1().TaskRuns(ns).Get(ctx, cr.Name, metav1.GetOptions{}) + taskRun, err := c.PipelineClient.TektonV1().TaskRuns(ns).Get(ctx, cr.Name, metav1.GetOptions{}) if err != nil { t.Errorf("Did not expect an error but got %v", err) } @@ -465,12 +465,12 @@ func taskRunFromExample(t *testing.T, ns, example string) objects.TektonObject { if err != nil { t.Fatal(err) } - var tr *v1beta1.TaskRun + var tr *v1.TaskRun if err := yaml.Unmarshal(contents, &tr); err != nil { t.Fatal(err) } tr.Namespace = ns - return objects.NewTaskRunObject(tr) + return objects.NewTaskRunObjectV1(tr) } func pipelineRunFromExample(t *testing.T, ns, example string) objects.TektonObject { @@ -478,12 +478,12 @@ func pipelineRunFromExample(t *testing.T, ns, example string) objects.TektonObje if err != nil { t.Fatal(err) } - var pr *v1beta1.PipelineRun + var pr *v1.PipelineRun if err := yaml.Unmarshal(contents, &pr); err != nil { t.Fatal(err) } pr.Namespace = ns - return objects.NewPipelineRunObject(pr) + return objects.NewPipelineRunObjectV1(pr) } func ignoreEnvironmentAnnotationsAndLabels(key string, value any) bool { diff --git a/test/kaniko.go b/test/kaniko.go index 401b1cdde0..4d9591aeae 100644 --- a/test/kaniko.go +++ b/test/kaniko.go @@ -23,76 +23,76 @@ import ( "github.com/google/go-containerregistry/pkg/name" "github.com/tektoncd/chains/pkg/chains" "github.com/tektoncd/chains/pkg/chains/objects" - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" - v1 "k8s.io/api/core/v1" + v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) const taskName = "kaniko-task" func kanikoPipelineRun(ns string) objects.TektonObject { - imagePipelineRun := v1beta1.PipelineRun{ + imagePipelineRun := v1.PipelineRun{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "image-pipelinerun", Namespace: ns, Annotations: map[string]string{chains.RekorAnnotation: "true"}, }, - Spec: v1beta1.PipelineRunSpec{ - PipelineSpec: &v1beta1.PipelineSpec{ - Tasks: []v1beta1.PipelineTask{{ + Spec: v1.PipelineRunSpec{ + PipelineSpec: &v1.PipelineSpec{ + Tasks: []v1.PipelineTask{{ Name: "kaniko", - TaskRef: &v1beta1.TaskRef{ + TaskRef: &v1.TaskRef{ Name: "kaniko-task", - Kind: v1beta1.NamespacedTaskKind, + Kind: v1.NamespacedTaskKind, }, }}, - Results: []v1beta1.PipelineResult{{ + Results: []v1.PipelineResult{{ Name: "IMAGE_URL", - Value: *v1beta1.NewStructuredValues("$(tasks.kaniko.results.IMAGE_URL)"), + Value: *v1.NewStructuredValues("$(tasks.kaniko.results.IMAGE_URL)"), }, { Name: "IMAGE_DIGEST", - Value: *v1beta1.NewStructuredValues("$(tasks.kaniko.results.IMAGE_DIGEST)"), + Value: *v1.NewStructuredValues("$(tasks.kaniko.results.IMAGE_DIGEST)"), }}, }, }, } - return objects.NewPipelineRunObject(&imagePipelineRun) + return objects.NewPipelineRunObjectV1(&imagePipelineRun) } func kanikoTaskRun(namespace string) objects.TektonObject { - tr := &v1beta1.TaskRun{ + tr := &v1.TaskRun{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "kaniko-taskrun", Namespace: namespace, }, - Spec: v1beta1.TaskRunSpec{ - TaskRef: &v1beta1.TaskRef{ + Spec: v1.TaskRunSpec{ + TaskRef: &v1.TaskRef{ Name: taskName, }, }, } - return objects.NewTaskRunObject(tr) + return objects.NewTaskRunObjectV1(tr) } -func kanikoTask(t *testing.T, namespace, destinationImage string) *v1beta1.Task { +func kanikoTask(t *testing.T, namespace, destinationImage string) *v1.Task { ref, err := name.ParseReference(destinationImage) if err != nil { t.Fatalf("unable to parse image name: %v", err) } - return &v1beta1.Task{ + return &v1.Task{ ObjectMeta: metav1.ObjectMeta{ Name: taskName, Namespace: namespace, }, - Spec: v1beta1.TaskSpec{ - Results: []v1beta1.TaskResult{ + Spec: v1.TaskSpec{ + Results: []v1.TaskResult{ {Name: "IMAGE_URL"}, {Name: "IMAGE_DIGEST"}, }, - Steps: []v1beta1.Step{{ + Steps: []v1.Step{{ Name: "create-dockerfile", Image: "bash:latest", - VolumeMounts: []v1.VolumeMount{{ + VolumeMounts: []corev1.VolumeMount{{ Name: "dockerfile", MountPath: "/dockerfile", }}, @@ -109,23 +109,23 @@ func kanikoTask(t *testing.T, namespace, destinationImage string) *v1beta1.Task // Need this to push the image to the insecure registry "--insecure", }, - VolumeMounts: []v1.VolumeMount{{ + VolumeMounts: []corev1.VolumeMount{{ Name: "dockerfile", MountPath: "/dockerfile", }}, }, { Name: "save-image-url", Image: "bash:latest", - VolumeMounts: []v1.VolumeMount{{ + VolumeMounts: []corev1.VolumeMount{{ Name: "dockerfile", MountPath: "/dockerfile", }}, Script: fmt.Sprintf("#!/usr/bin/env bash\necho %s | tee $(results.IMAGE_URL.path)", ref.String()), }, }, - Volumes: []v1.Volume{{ + Volumes: []corev1.Volume{{ Name: "dockerfile", - VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}, + VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}, }}, }, } @@ -144,14 +144,14 @@ cosign verify --allow-insecure-registry --key cosign.pub %s cosign verify-attestation --allow-insecure-registry --key cosign.pub %s` script = fmt.Sprintf(script, publicKey, destinationImage, destinationImage) - return objects.NewTaskRunObject(&v1beta1.TaskRun{ + return objects.NewTaskRunObjectV1(&v1.TaskRun{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "verify-kaniko-taskrun", Namespace: namespace, }, - Spec: v1beta1.TaskRunSpec{ - TaskSpec: &v1beta1.TaskSpec{ - Steps: []v1beta1.Step{{ + Spec: v1.TaskRunSpec{ + TaskSpec: &v1.TaskSpec{ + Steps: []v1.Step{{ Name: "verify-image", Image: "gcr.io/projectsigstore/cosign/ci/cosign:d764e8b89934dc1043bd1b13112a66641c63a038@sha256:228c37f9f37415efbd6a4ff16aae81197206ce1410a227bcab8ac8b039b36237", Script: script, diff --git a/test/test_utils.go b/test/test_utils.go index 6d322a7fde..392fe978ad 100644 --- a/test/test_utils.go +++ b/test/test_utils.go @@ -35,7 +35,7 @@ import ( chainsstorage "github.com/tektoncd/chains/pkg/chains/storage" "github.com/tektoncd/chains/pkg/config" "github.com/tektoncd/chains/pkg/test/tekton" - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" pipelineclientset "github.com/tektoncd/pipeline/pkg/client/clientset/versioned" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -44,9 +44,9 @@ import ( "k8s.io/client-go/kubernetes" ) -func getTr(ctx context.Context, t *testing.T, c pipelineclientset.Interface, name, ns string) (tr *v1beta1.TaskRun) { +func getTr(ctx context.Context, t *testing.T, c pipelineclientset.Interface, name, ns string) (tr *v1.TaskRun) { t.Helper() - tr, err := c.TektonV1beta1().TaskRuns(ns).Get(ctx, name, metav1.GetOptions{}) + tr, err := c.TektonV1().TaskRuns(ns).Get(ctx, name, metav1.GetOptions{}) if err != nil { t.Error(err) } @@ -116,16 +116,16 @@ func signed(obj objects.TektonObject) bool { return ok } -var simpleTaskspec = v1beta1.TaskSpec{ - Steps: []v1beta1.Step{{ +var simpleTaskspec = v1.TaskSpec{ + Steps: []v1.Step{{ Image: "busybox", Script: "echo true", }}, } -var simpleTaskRun = v1beta1.TaskRun{ +var simpleTaskRun = v1.TaskRun{ ObjectMeta: metav1.ObjectMeta{GenerateName: "test-task-"}, - Spec: v1beta1.TaskRunSpec{TaskSpec: &simpleTaskspec}, + Spec: v1.TaskRunSpec{TaskSpec: &simpleTaskspec}, } func makeBucket(t *testing.T, client *storage.Client) (string, func()) { @@ -216,11 +216,11 @@ func printDebugging(t *testing.T, obj objects.TektonObject) { kind := obj.GetObjectKind().GroupVersionKind().Kind t.Logf("============================== %s logs ==============================", obj.GetGVK()) - output, _ := exec.Command("tkn", kind, "logs", "-n", obj.GetNamespace(), obj.GetName()).CombinedOutput() + output, _ := exec.Command("tkn", strings.ToLower(kind), "logs", "-n", obj.GetNamespace(), obj.GetName()).CombinedOutput() t.Log(string(output)) t.Logf("============================== %s describe ==============================", obj.GetGVK()) - output, _ = exec.Command("tkn", kind, "describe", "-n", obj.GetNamespace(), obj.GetName()).CombinedOutput() + output, _ = exec.Command("tkn", strings.ToLower(kind), "describe", "-n", obj.GetNamespace(), obj.GetName()).CombinedOutput() t.Log(string(output)) t.Log("============================== chains controller logs ==============================") @@ -251,10 +251,16 @@ func verifySignature(ctx context.Context, t *testing.T, c *clients, obj objects. var configuredBackends []string var key string switch obj.GetObject().(type) { - case *objects.TaskRunObject: + case *objects.TaskRunObjectV1: configuredBackends = sets.List[string](cfg.Artifacts.TaskRuns.StorageBackend) key = fmt.Sprintf("taskrun-%s", obj.GetUID()) - case *objects.PipelineRunObject: + case *objects.PipelineRunObjectV1: + configuredBackends = sets.List[string](cfg.Artifacts.PipelineRuns.StorageBackend) + key = fmt.Sprintf("pipelinerun-%s", obj.GetUID()) + case *objects.TaskRunObjectV1Beta1: + configuredBackends = sets.List[string](cfg.Artifacts.TaskRuns.StorageBackend) + key = fmt.Sprintf("taskrun-%s", obj.GetUID()) + case *objects.PipelineRunObjectV1Beta1: configuredBackends = sets.List[string](cfg.Artifacts.PipelineRuns.StorageBackend) key = fmt.Sprintf("pipelinerun-%s", obj.GetUID()) } diff --git a/test/testdata/slsa/v2alpha2/pipeline-output-image.json b/test/testdata/slsa/v2alpha2/pipeline-output-image.json index b7da1c6273..67a67f4f2c 100644 --- a/test/testdata/slsa/v2alpha2/pipeline-output-image.json +++ b/test/testdata/slsa/v2alpha2/pipeline-output-image.json @@ -77,7 +77,9 @@ } ] }, - "timeout": "1h0m0s" + "timeouts": { + "pipeline": "1h0m0s" + } } }, "resolvedDependencies": [ diff --git a/test/testdata/type-hinting/taskrun.json b/test/testdata/type-hinting/taskrun.json new file mode 100644 index 0000000000..655878c215 --- /dev/null +++ b/test/testdata/type-hinting/taskrun.json @@ -0,0 +1,33 @@ +{ + "apiVersion": "tekton.dev/v1", + "kind": "TaskRun", + "metadata": { + "name": "image-build", + "annotations": { + "chains.tekton.dev/rekor": "true" + } + }, + "spec": { + "taskSpec": { + "results": [ + { + "name": "first-image-IMAGE_URL", + "type": "string", + "description": "The precise URL of the OCI image built." + }, + { + "name": "first-image-IMAGE_DIGEST", + "type": "string", + "description": "The algorithm and digest of the OCI image built." + } + ], + "steps": [ + { + "name": "dummy-build", + "image": "bash:latest", + "script": "#!/usr/bin/env bash\necho -n \"gcr.io/foo/bar\" | tee $(results.first-image-IMAGE_URL.path)\necho -n \"sha256:586789aa031fafc7d78a5393cdc772e0b55107ea54bb8bcf3f2cdac6c6da51ee\" | tee $(results.first-image-IMAGE_DIGEST.path)\n" + } + ] + } + } +} \ No newline at end of file diff --git a/vendor/github.com/go-logr/logr/slogr/sloghandler.go b/vendor/github.com/go-logr/logr/slogr/sloghandler.go new file mode 100644 index 0000000000..ec6725ce2c --- /dev/null +++ b/vendor/github.com/go-logr/logr/slogr/sloghandler.go @@ -0,0 +1,168 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package slogr + +import ( + "context" + "log/slog" + + "github.com/go-logr/logr" +) + +type slogHandler struct { + // May be nil, in which case all logs get discarded. + sink logr.LogSink + // Non-nil if sink is non-nil and implements SlogSink. + slogSink SlogSink + + // groupPrefix collects values from WithGroup calls. It gets added as + // prefix to value keys when handling a log record. + groupPrefix string + + // levelBias can be set when constructing the handler to influence the + // slog.Level of log records. A positive levelBias reduces the + // slog.Level value. slog has no API to influence this value after the + // handler got created, so it can only be set indirectly through + // Logger.V. + levelBias slog.Level +} + +var _ slog.Handler = &slogHandler{} + +// groupSeparator is used to concatenate WithGroup names and attribute keys. +const groupSeparator = "." + +// GetLevel is used for black box unit testing. +func (l *slogHandler) GetLevel() slog.Level { + return l.levelBias +} + +func (l *slogHandler) Enabled(ctx context.Context, level slog.Level) bool { + return l.sink != nil && (level >= slog.LevelError || l.sink.Enabled(l.levelFromSlog(level))) +} + +func (l *slogHandler) Handle(ctx context.Context, record slog.Record) error { + if l.slogSink != nil { + // Only adjust verbosity level of log entries < slog.LevelError. + if record.Level < slog.LevelError { + record.Level -= l.levelBias + } + return l.slogSink.Handle(ctx, record) + } + + // No need to check for nil sink here because Handle will only be called + // when Enabled returned true. + + kvList := make([]any, 0, 2*record.NumAttrs()) + record.Attrs(func(attr slog.Attr) bool { + if attr.Key != "" { + kvList = append(kvList, l.addGroupPrefix(attr.Key), attr.Value.Resolve().Any()) + } + return true + }) + if record.Level >= slog.LevelError { + l.sinkWithCallDepth().Error(nil, record.Message, kvList...) + } else { + level := l.levelFromSlog(record.Level) + l.sinkWithCallDepth().Info(level, record.Message, kvList...) + } + return nil +} + +// sinkWithCallDepth adjusts the stack unwinding so that when Error or Info +// are called by Handle, code in slog gets skipped. +// +// This offset currently (Go 1.21.0) works for calls through +// slog.New(NewSlogHandler(...)). There's no guarantee that the call +// chain won't change. Wrapping the handler will also break unwinding. It's +// still better than not adjusting at all.... +// +// This cannot be done when constructing the handler because NewLogr needs +// access to the original sink without this adjustment. A second copy would +// work, but then WithAttrs would have to be called for both of them. +func (l *slogHandler) sinkWithCallDepth() logr.LogSink { + if sink, ok := l.sink.(logr.CallDepthLogSink); ok { + return sink.WithCallDepth(2) + } + return l.sink +} + +func (l *slogHandler) WithAttrs(attrs []slog.Attr) slog.Handler { + if l.sink == nil || len(attrs) == 0 { + return l + } + + copy := *l + if l.slogSink != nil { + copy.slogSink = l.slogSink.WithAttrs(attrs) + copy.sink = copy.slogSink + } else { + kvList := make([]any, 0, 2*len(attrs)) + for _, attr := range attrs { + if attr.Key != "" { + kvList = append(kvList, l.addGroupPrefix(attr.Key), attr.Value.Resolve().Any()) + } + } + copy.sink = l.sink.WithValues(kvList...) + } + return © +} + +func (l *slogHandler) WithGroup(name string) slog.Handler { + if l.sink == nil { + return l + } + copy := *l + if l.slogSink != nil { + copy.slogSink = l.slogSink.WithGroup(name) + copy.sink = l.slogSink + } else { + copy.groupPrefix = copy.addGroupPrefix(name) + } + return © +} + +func (l *slogHandler) addGroupPrefix(name string) string { + if l.groupPrefix == "" { + return name + } + return l.groupPrefix + groupSeparator + name +} + +// levelFromSlog adjusts the level by the logger's verbosity and negates it. +// It ensures that the result is >= 0. This is necessary because the result is +// passed to a logr.LogSink and that API did not historically document whether +// levels could be negative or what that meant. +// +// Some example usage: +// logrV0 := getMyLogger() +// logrV2 := logrV0.V(2) +// slogV2 := slog.New(slogr.NewSlogHandler(logrV2)) +// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6) +// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2) +// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0) +func (l *slogHandler) levelFromSlog(level slog.Level) int { + result := -level + result += l.levelBias // in case the original logr.Logger had a V level + if result < 0 { + result = 0 // because logr.LogSink doesn't expect negative V levels + } + return int(result) +} diff --git a/vendor/github.com/go-logr/logr/slogr/slogr.go b/vendor/github.com/go-logr/logr/slogr/slogr.go new file mode 100644 index 0000000000..eb519ae23f --- /dev/null +++ b/vendor/github.com/go-logr/logr/slogr/slogr.go @@ -0,0 +1,108 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package slogr enables usage of a slog.Handler with logr.Logger as front-end +// API and of a logr.LogSink through the slog.Handler and thus slog.Logger +// APIs. +// +// See the README in the top-level [./logr] package for a discussion of +// interoperability. +package slogr + +import ( + "context" + "log/slog" + + "github.com/go-logr/logr" +) + +// NewLogr returns a logr.Logger which writes to the slog.Handler. +// +// The logr verbosity level is mapped to slog levels such that V(0) becomes +// slog.LevelInfo and V(4) becomes slog.LevelDebug. +func NewLogr(handler slog.Handler) logr.Logger { + if handler, ok := handler.(*slogHandler); ok { + if handler.sink == nil { + return logr.Discard() + } + return logr.New(handler.sink).V(int(handler.levelBias)) + } + return logr.New(&slogSink{handler: handler}) +} + +// NewSlogHandler returns a slog.Handler which writes to the same sink as the logr.Logger. +// +// The returned logger writes all records with level >= slog.LevelError as +// error log entries with LogSink.Error, regardless of the verbosity level of +// the logr.Logger: +// +// logger := +// slog.New(NewSlogHandler(logger.V(10))).Error(...) -> logSink.Error(...) +// +// The level of all other records gets reduced by the verbosity +// level of the logr.Logger and the result is negated. If it happens +// to be negative, then it gets replaced by zero because a LogSink +// is not expected to handled negative levels: +// +// slog.New(NewSlogHandler(logger)).Debug(...) -> logger.GetSink().Info(level=4, ...) +// slog.New(NewSlogHandler(logger)).Warning(...) -> logger.GetSink().Info(level=0, ...) +// slog.New(NewSlogHandler(logger)).Info(...) -> logger.GetSink().Info(level=0, ...) +// slog.New(NewSlogHandler(logger.V(4))).Info(...) -> logger.GetSink().Info(level=4, ...) +func NewSlogHandler(logger logr.Logger) slog.Handler { + if sink, ok := logger.GetSink().(*slogSink); ok && logger.GetV() == 0 { + return sink.handler + } + + handler := &slogHandler{sink: logger.GetSink(), levelBias: slog.Level(logger.GetV())} + if slogSink, ok := handler.sink.(SlogSink); ok { + handler.slogSink = slogSink + } + return handler +} + +// SlogSink is an optional interface that a LogSink can implement to support +// logging through the slog.Logger or slog.Handler APIs better. It then should +// also support special slog values like slog.Group. When used as a +// slog.Handler, the advantages are: +// +// - stack unwinding gets avoided in favor of logging the pre-recorded PC, +// as intended by slog +// - proper grouping of key/value pairs via WithGroup +// - verbosity levels > slog.LevelInfo can be recorded +// - less overhead +// +// Both APIs (logr.Logger and slog.Logger/Handler) then are supported equally +// well. Developers can pick whatever API suits them better and/or mix +// packages which use either API in the same binary with a common logging +// implementation. +// +// This interface is necessary because the type implementing the LogSink +// interface cannot also implement the slog.Handler interface due to the +// different prototype of the common Enabled method. +// +// An implementation could support both interfaces in two different types, but then +// additional interfaces would be needed to convert between those types in NewLogr +// and NewSlogHandler. +type SlogSink interface { + logr.LogSink + + Handle(ctx context.Context, record slog.Record) error + WithAttrs(attrs []slog.Attr) SlogSink + WithGroup(name string) SlogSink +} diff --git a/vendor/github.com/go-logr/logr/slogr/slogsink.go b/vendor/github.com/go-logr/logr/slogr/slogsink.go new file mode 100644 index 0000000000..6fbac561d9 --- /dev/null +++ b/vendor/github.com/go-logr/logr/slogr/slogsink.go @@ -0,0 +1,122 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package slogr + +import ( + "context" + "log/slog" + "runtime" + "time" + + "github.com/go-logr/logr" +) + +var ( + _ logr.LogSink = &slogSink{} + _ logr.CallDepthLogSink = &slogSink{} + _ Underlier = &slogSink{} +) + +// Underlier is implemented by the LogSink returned by NewLogr. +type Underlier interface { + // GetUnderlying returns the Handler used by the LogSink. + GetUnderlying() slog.Handler +} + +const ( + // nameKey is used to log the `WithName` values as an additional attribute. + nameKey = "logger" + + // errKey is used to log the error parameter of Error as an additional attribute. + errKey = "err" +) + +type slogSink struct { + callDepth int + name string + handler slog.Handler +} + +func (l *slogSink) Init(info logr.RuntimeInfo) { + l.callDepth = info.CallDepth +} + +func (l *slogSink) GetUnderlying() slog.Handler { + return l.handler +} + +func (l *slogSink) WithCallDepth(depth int) logr.LogSink { + newLogger := *l + newLogger.callDepth += depth + return &newLogger +} + +func (l *slogSink) Enabled(level int) bool { + return l.handler.Enabled(context.Background(), slog.Level(-level)) +} + +func (l *slogSink) Info(level int, msg string, kvList ...interface{}) { + l.log(nil, msg, slog.Level(-level), kvList...) +} + +func (l *slogSink) Error(err error, msg string, kvList ...interface{}) { + l.log(err, msg, slog.LevelError, kvList...) +} + +func (l *slogSink) log(err error, msg string, level slog.Level, kvList ...interface{}) { + var pcs [1]uintptr + // skip runtime.Callers, this function, Info/Error, and all helper functions above that. + runtime.Callers(3+l.callDepth, pcs[:]) + + record := slog.NewRecord(time.Now(), level, msg, pcs[0]) + if l.name != "" { + record.AddAttrs(slog.String(nameKey, l.name)) + } + if err != nil { + record.AddAttrs(slog.Any(errKey, err)) + } + record.Add(kvList...) + l.handler.Handle(context.Background(), record) +} + +func (l slogSink) WithName(name string) logr.LogSink { + if l.name != "" { + l.name = l.name + "/" + } + l.name += name + return &l +} + +func (l slogSink) WithValues(kvList ...interface{}) logr.LogSink { + l.handler = l.handler.WithAttrs(kvListToAttrs(kvList...)) + return &l +} + +func kvListToAttrs(kvList ...interface{}) []slog.Attr { + // We don't need the record itself, only its Add method. + record := slog.NewRecord(time.Time{}, 0, "", 0) + record.Add(kvList...) + attrs := make([]slog.Attr, 0, record.NumAttrs()) + record.Attrs(func(attr slog.Attr) bool { + attrs = append(attrs, attr) + return true + }) + return attrs +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipeline_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipeline_validation.go index 2dc3b884f7..9c000c673e 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipeline_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/pipeline_validation.go @@ -94,12 +94,6 @@ func (ps *PipelineSpec) Validate(ctx context.Context) (errs *apis.FieldError) { // have "enable-api-fields" set to "alpha" or "beta". func (ps *PipelineSpec) ValidateBetaFields(ctx context.Context) *apis.FieldError { var errs *apis.FieldError - // Object parameters - for i, p := range ps.Params { - if p.Type == ParamTypeObject { - errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "object type parameter", config.BetaAPIFields).ViaFieldIndex("params", i)) - } - } // Indexing into array parameters arrayParamIndexingRefs := ps.GetIndexingReferencesToArrayParams() if len(arrayParamIndexingRefs) != 0 { @@ -109,7 +103,7 @@ func (ps *PipelineSpec) ValidateBetaFields(ctx context.Context) *apis.FieldError for i, result := range ps.Results { switch result.Type { case ResultsTypeObject: - errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "object results", config.BetaAPIFields).ViaFieldIndex("results", i)) + // stable feature case ResultsTypeArray: errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "array results", config.BetaAPIFields).ViaFieldIndex("results", i)) case ResultsTypeString: diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/task_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/task_validation.go index 38b316dc7a..23f7178645 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/task_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/task_validation.go @@ -102,12 +102,6 @@ func (ts *TaskSpec) Validate(ctx context.Context) (errs *apis.FieldError) { // have "enable-api-fields" set to "alpha" or "beta". func (ts *TaskSpec) ValidateBetaFields(ctx context.Context) *apis.FieldError { var errs *apis.FieldError - // Object parameters - for i, p := range ts.Params { - if p.Type == ParamTypeObject { - errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "object type parameter", config.BetaAPIFields).ViaFieldIndex("params", i)) - } - } // Indexing into array parameters arrayIndexParamRefs := ts.GetIndexingReferencesToArrayParams() if len(arrayIndexParamRefs) != 0 { @@ -117,7 +111,7 @@ func (ts *TaskSpec) ValidateBetaFields(ctx context.Context) *apis.FieldError { for i, result := range ts.Results { switch result.Type { case ResultsTypeObject: - errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "object results", config.BetaAPIFields).ViaFieldIndex("results", i)) + // stable feature case ResultsTypeArray: errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "array results", config.BetaAPIFields).ViaFieldIndex("results", i)) case ResultsTypeString: diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskrun_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskrun_validation.go index b9495138fc..e9bf1c17f6 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskrun_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1/taskrun_validation.go @@ -237,11 +237,6 @@ func ValidateWorkspaceBindings(ctx context.Context, wb []WorkspaceBinding) (errs func ValidateParameters(ctx context.Context, params Params) (errs *apis.FieldError) { var names []string for _, p := range params { - if p.Value.Type == ParamTypeObject { - // Object type parameter is a beta feature and will fail validation if it's used in a taskrun spec - // when the enable-api-fields feature gate is not "alpha" or "beta". - errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "object type parameter", config.BetaAPIFields)) - } names = append(names, p.Name) } return errs.Also(validateNoDuplicateNames(names, false)) diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_validation.go index 7ee9ba354e..69d39e0954 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_validation.go @@ -97,12 +97,6 @@ func (ps *PipelineSpec) Validate(ctx context.Context) (errs *apis.FieldError) { // `enable-api-fields` but does not have "enable-api-fields" set to "alpha" or "beta". func (ps *PipelineSpec) ValidateBetaFields(ctx context.Context) *apis.FieldError { var errs *apis.FieldError - // Object parameters - for i, p := range ps.Params { - if p.Type == ParamTypeObject { - errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "object type parameter", config.BetaAPIFields).ViaFieldIndex("params", i)) - } - } // Indexing into array parameters arrayParamIndexingRefs := ps.GetIndexingReferencesToArrayParams() if len(arrayParamIndexingRefs) != 0 { @@ -112,7 +106,7 @@ func (ps *PipelineSpec) ValidateBetaFields(ctx context.Context) *apis.FieldError for i, result := range ps.Results { switch result.Type { case ResultsTypeObject: - errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "object results", config.BetaAPIFields).ViaFieldIndex("results", i)) + // stable feature case ResultsTypeArray: errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "array results", config.BetaAPIFields).ViaFieldIndex("results", i)) case ResultsTypeString: diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_validation.go index 95ddbc1f6d..c9a5e5a9ec 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_validation.go @@ -105,12 +105,6 @@ func (ts *TaskSpec) Validate(ctx context.Context) (errs *apis.FieldError) { // `enable-api-fields` but does not have "enable-api-fields" set to "alpha" or "beta". func (ts *TaskSpec) ValidateBetaFields(ctx context.Context) *apis.FieldError { var errs *apis.FieldError - // Object parameters - for i, p := range ts.Params { - if p.Type == ParamTypeObject { - errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "object type parameter", config.BetaAPIFields).ViaFieldIndex("params", i)) - } - } // Indexing into array parameters arrayIndexParamRefs := ts.GetIndexingReferencesToArrayParams() if len(arrayIndexParamRefs) != 0 { @@ -120,7 +114,7 @@ func (ts *TaskSpec) ValidateBetaFields(ctx context.Context) *apis.FieldError { for i, result := range ts.Results { switch result.Type { case ResultsTypeObject: - errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "object results", config.BetaAPIFields).ViaFieldIndex("results", i)) + // stable feature case ResultsTypeArray: errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "array results", config.BetaAPIFields).ViaFieldIndex("results", i)) case ResultsTypeString: diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_conversion.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_conversion.go index f2e847c612..de2243f25a 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_conversion.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_conversion.go @@ -29,6 +29,7 @@ import ( const ( cloudEventsAnnotationKey = "tekton.dev/v1beta1CloudEvents" resourcesResultAnnotationKey = "tekton.dev/v1beta1ResourcesResult" + resourcesStatusAnnotationKey = "tekton.dev/v1beta1ResourcesStatus" ) var _ apis.Convertible = (*TaskRun)(nil) @@ -41,12 +42,18 @@ func (tr *TaskRun) ConvertTo(ctx context.Context, to apis.Convertible) error { switch sink := to.(type) { case *v1.TaskRun: sink.ObjectMeta = tr.ObjectMeta + if err := serializeTaskRunResources(&sink.ObjectMeta, &tr.Spec); err != nil { + return err + } if err := serializeTaskRunCloudEvents(&sink.ObjectMeta, &tr.Status); err != nil { return err } if err := serializeTaskRunResourcesResult(&sink.ObjectMeta, &tr.Status); err != nil { return err } + if err := serializeTaskRunResourcesStatus(&sink.ObjectMeta, &tr.Status); err != nil { + return err + } if err := tr.Status.ConvertTo(ctx, &sink.Status, &sink.ObjectMeta); err != nil { return err } @@ -115,6 +122,9 @@ func (tr *TaskRun) ConvertFrom(ctx context.Context, from apis.Convertible) error switch source := from.(type) { case *v1.TaskRun: tr.ObjectMeta = source.ObjectMeta + if err := deserializeTaskRunResources(&tr.ObjectMeta, &tr.Spec); err != nil { + return err + } if err := deserializeTaskRunCloudEvents(&tr.ObjectMeta, &tr.Status); err != nil { return err } @@ -124,6 +134,9 @@ func (tr *TaskRun) ConvertFrom(ctx context.Context, from apis.Convertible) error if err := tr.Status.ConvertFrom(ctx, source.Status, &tr.ObjectMeta); err != nil { return err } + if err := deserializeTaskRunResourcesStatus(&tr.ObjectMeta, &tr.Status); err != nil { + return err + } return tr.Spec.ConvertFrom(ctx, &source.Spec, &tr.ObjectMeta) default: return fmt.Errorf("unknown version, got: %T", tr) @@ -381,6 +394,25 @@ func (ss *SidecarState) convertFrom(ctx context.Context, source v1.SidecarState) ss.ImageID = source.ImageID } +func serializeTaskRunResources(meta *metav1.ObjectMeta, spec *TaskRunSpec) error { + if spec.Resources == nil { + return nil + } + return version.SerializeToMetadata(meta, spec.Resources, resourcesAnnotationKey) +} + +func deserializeTaskRunResources(meta *metav1.ObjectMeta, spec *TaskRunSpec) error { + resources := &TaskRunResources{} + err := version.DeserializeFromMetadata(meta, resources, resourcesAnnotationKey) + if err != nil { + return err + } + if resources.Inputs != nil || resources.Outputs != nil { + spec.Resources = resources + } + return nil +} + func serializeTaskRunCloudEvents(meta *metav1.ObjectMeta, status *TaskRunStatus) error { if status.CloudEvents == nil { return nil @@ -418,3 +450,28 @@ func deserializeTaskRunResourcesResult(meta *metav1.ObjectMeta, status *TaskRunS } return nil } + +func serializeTaskRunResourcesStatus(meta *metav1.ObjectMeta, status *TaskRunStatus) error { + if status.TaskSpec == nil { + return nil + } + if status.TaskSpec.Resources == nil { + return nil + } + return version.SerializeToMetadata(meta, status.TaskSpec.Resources, resourcesStatusAnnotationKey) +} + +func deserializeTaskRunResourcesStatus(meta *metav1.ObjectMeta, status *TaskRunStatus) error { + resourcesStatus := &TaskResources{} + err := version.DeserializeFromMetadata(meta, resourcesStatus, resourcesStatusAnnotationKey) + if err != nil { + return err + } + if resourcesStatus.Inputs != nil || resourcesStatus.Outputs != nil { + if status.TaskRunStatusFields.TaskSpec == nil { + status.TaskSpec = &TaskSpec{} + } + status.TaskSpec.Resources = resourcesStatus + } + return nil +} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_validation.go b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_validation.go index 0baf7dfc7a..e5108a6464 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_validation.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_validation.go @@ -239,11 +239,6 @@ func ValidateWorkspaceBindings(ctx context.Context, wb []WorkspaceBinding) (errs func ValidateParameters(ctx context.Context, params Params) (errs *apis.FieldError) { var names []string for _, p := range params { - if p.Value.Type == ParamTypeObject { - // Object type parameter is a beta feature and will fail validation if it's used in a taskrun spec - // when the enable-api-fields feature gate is not "alpha" or "beta". - errs = errs.Also(config.ValidateEnabledAPIFields(ctx, "object type parameter", config.BetaAPIFields)) - } names = append(names, p.Name) } return errs.Also(validateNoDuplicateNames(names, false)) diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/pipelinerun/fake/fake.go b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/pipelinerun/fake/fake.go deleted file mode 100644 index 2b97a8ec5d..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/pipelinerun/fake/fake.go +++ /dev/null @@ -1,40 +0,0 @@ -/* -Copyright 2020 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by injection-gen. DO NOT EDIT. - -package fake - -import ( - context "context" - - fake "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory/fake" - pipelinerun "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/pipelinerun" - controller "knative.dev/pkg/controller" - injection "knative.dev/pkg/injection" -) - -var Get = pipelinerun.Get - -func init() { - injection.Fake.RegisterInformer(withInformer) -} - -func withInformer(ctx context.Context) (context.Context, controller.Informer) { - f := fake.Get(ctx) - inf := f.Tekton().V1beta1().PipelineRuns() - return context.WithValue(ctx, pipelinerun.Key{}, inf), inf.Informer() -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/pipelinerun/pipelinerun.go b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/pipelinerun/pipelinerun.go deleted file mode 100644 index 29efdfecd4..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/pipelinerun/pipelinerun.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright 2020 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by injection-gen. DO NOT EDIT. - -package pipelinerun - -import ( - context "context" - - v1beta1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1beta1" - factory "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory" - controller "knative.dev/pkg/controller" - injection "knative.dev/pkg/injection" - logging "knative.dev/pkg/logging" -) - -func init() { - injection.Default.RegisterInformer(withInformer) -} - -// Key is used for associating the Informer inside the context.Context. -type Key struct{} - -func withInformer(ctx context.Context) (context.Context, controller.Informer) { - f := factory.Get(ctx) - inf := f.Tekton().V1beta1().PipelineRuns() - return context.WithValue(ctx, Key{}, inf), inf.Informer() -} - -// Get extracts the typed informer from the context. -func Get(ctx context.Context) v1beta1.PipelineRunInformer { - untyped := ctx.Value(Key{}) - if untyped == nil { - logging.FromContext(ctx).Panic( - "Unable to fetch github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1beta1.PipelineRunInformer from context.") - } - return untyped.(v1beta1.PipelineRunInformer) -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/taskrun/fake/fake.go b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/taskrun/fake/fake.go deleted file mode 100644 index 79919b4612..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/taskrun/fake/fake.go +++ /dev/null @@ -1,40 +0,0 @@ -/* -Copyright 2020 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by injection-gen. DO NOT EDIT. - -package fake - -import ( - context "context" - - fake "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory/fake" - taskrun "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/taskrun" - controller "knative.dev/pkg/controller" - injection "knative.dev/pkg/injection" -) - -var Get = taskrun.Get - -func init() { - injection.Fake.RegisterInformer(withInformer) -} - -func withInformer(ctx context.Context) (context.Context, controller.Informer) { - f := fake.Get(ctx) - inf := f.Tekton().V1beta1().TaskRuns() - return context.WithValue(ctx, taskrun.Key{}, inf), inf.Informer() -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/taskrun/taskrun.go b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/taskrun/taskrun.go deleted file mode 100644 index 7cab8456f7..0000000000 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/taskrun/taskrun.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright 2020 The Tekton Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by injection-gen. DO NOT EDIT. - -package taskrun - -import ( - context "context" - - v1beta1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1beta1" - factory "github.com/tektoncd/pipeline/pkg/client/injection/informers/factory" - controller "knative.dev/pkg/controller" - injection "knative.dev/pkg/injection" - logging "knative.dev/pkg/logging" -) - -func init() { - injection.Default.RegisterInformer(withInformer) -} - -// Key is used for associating the Informer inside the context.Context. -type Key struct{} - -func withInformer(ctx context.Context) (context.Context, controller.Informer) { - f := factory.Get(ctx) - inf := f.Tekton().V1beta1().TaskRuns() - return context.WithValue(ctx, Key{}, inf), inf.Informer() -} - -// Get extracts the typed informer from the context. -func Get(ctx context.Context) v1beta1.TaskRunInformer { - untyped := ctx.Value(Key{}) - if untyped == nil { - logging.FromContext(ctx).Panic( - "Unable to fetch github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1beta1.TaskRunInformer from context.") - } - return untyped.(v1beta1.TaskRunInformer) -} diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/injection/reconciler/pipeline/v1beta1/pipelinerun/controller.go b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/reconciler/pipeline/v1/pipelinerun/controller.go similarity index 99% rename from vendor/github.com/tektoncd/pipeline/pkg/client/injection/reconciler/pipeline/v1beta1/pipelinerun/controller.go rename to vendor/github.com/tektoncd/pipeline/pkg/client/injection/reconciler/pipeline/v1/pipelinerun/controller.go index f93cc4afdc..b1efcea654 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/injection/reconciler/pipeline/v1beta1/pipelinerun/controller.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/reconciler/pipeline/v1/pipelinerun/controller.go @@ -26,7 +26,7 @@ import ( versionedscheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme" client "github.com/tektoncd/pipeline/pkg/client/injection/client" - pipelinerun "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/pipelinerun" + pipelinerun "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1/pipelinerun" zap "go.uber.org/zap" corev1 "k8s.io/api/core/v1" labels "k8s.io/apimachinery/pkg/labels" diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/injection/reconciler/pipeline/v1beta1/pipelinerun/reconciler.go b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/reconciler/pipeline/v1/pipelinerun/reconciler.go similarity index 86% rename from vendor/github.com/tektoncd/pipeline/pkg/client/injection/reconciler/pipeline/v1beta1/pipelinerun/reconciler.go rename to vendor/github.com/tektoncd/pipeline/pkg/client/injection/reconciler/pipeline/v1/pipelinerun/reconciler.go index 219d8418e9..f49825001b 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/injection/reconciler/pipeline/v1beta1/pipelinerun/reconciler.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/reconciler/pipeline/v1/pipelinerun/reconciler.go @@ -23,12 +23,12 @@ import ( json "encoding/json" fmt "fmt" - v1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned" - pipelinev1beta1 "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1beta1" + pipelinev1 "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1" zap "go.uber.org/zap" "go.uber.org/zap/zapcore" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" equality "k8s.io/apimachinery/pkg/api/equality" errors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -43,40 +43,40 @@ import ( ) // Interface defines the strongly typed interfaces to be implemented by a -// controller reconciling v1beta1.PipelineRun. +// controller reconciling v1.PipelineRun. type Interface interface { - // ReconcileKind implements custom logic to reconcile v1beta1.PipelineRun. Any changes + // ReconcileKind implements custom logic to reconcile v1.PipelineRun. Any changes // to the objects .Status or .Finalizers will be propagated to the stored // object. It is recommended that implementors do not call any update calls // for the Kind inside of ReconcileKind, it is the responsibility of the calling // controller to propagate those properties. The resource passed to ReconcileKind // will always have an empty deletion timestamp. - ReconcileKind(ctx context.Context, o *v1beta1.PipelineRun) reconciler.Event + ReconcileKind(ctx context.Context, o *v1.PipelineRun) reconciler.Event } // Finalizer defines the strongly typed interfaces to be implemented by a -// controller finalizing v1beta1.PipelineRun. +// controller finalizing v1.PipelineRun. type Finalizer interface { - // FinalizeKind implements custom logic to finalize v1beta1.PipelineRun. Any changes + // FinalizeKind implements custom logic to finalize v1.PipelineRun. Any changes // to the objects .Status or .Finalizers will be ignored. Returning a nil or // Normal type reconciler.Event will allow the finalizer to be deleted on // the resource. The resource passed to FinalizeKind will always have a set // deletion timestamp. - FinalizeKind(ctx context.Context, o *v1beta1.PipelineRun) reconciler.Event + FinalizeKind(ctx context.Context, o *v1.PipelineRun) reconciler.Event } // ReadOnlyInterface defines the strongly typed interfaces to be implemented by a -// controller reconciling v1beta1.PipelineRun if they want to process resources for which +// controller reconciling v1.PipelineRun if they want to process resources for which // they are not the leader. type ReadOnlyInterface interface { - // ObserveKind implements logic to observe v1beta1.PipelineRun. + // ObserveKind implements logic to observe v1.PipelineRun. // This method should not write to the API. - ObserveKind(ctx context.Context, o *v1beta1.PipelineRun) reconciler.Event + ObserveKind(ctx context.Context, o *v1.PipelineRun) reconciler.Event } -type doReconcile func(ctx context.Context, o *v1beta1.PipelineRun) reconciler.Event +type doReconcile func(ctx context.Context, o *v1.PipelineRun) reconciler.Event -// reconcilerImpl implements controller.Reconciler for v1beta1.PipelineRun resources. +// reconcilerImpl implements controller.Reconciler for v1.PipelineRun resources. type reconcilerImpl struct { // LeaderAwareFuncs is inlined to help us implement reconciler.LeaderAware. reconciler.LeaderAwareFuncs @@ -85,7 +85,7 @@ type reconcilerImpl struct { Client versioned.Interface // Listers index properties about resources. - Lister pipelinev1beta1.PipelineRunLister + Lister pipelinev1.PipelineRunLister // Recorder is an event recorder for recording Event resources to the // Kubernetes API. @@ -112,7 +112,7 @@ var _ controller.Reconciler = (*reconcilerImpl)(nil) // Check that our generated Reconciler is always LeaderAware. var _ reconciler.LeaderAware = (*reconcilerImpl)(nil) -func NewReconciler(ctx context.Context, logger *zap.SugaredLogger, client versioned.Interface, lister pipelinev1beta1.PipelineRunLister, recorder record.EventRecorder, r Interface, options ...controller.Options) controller.Reconciler { +func NewReconciler(ctx context.Context, logger *zap.SugaredLogger, client versioned.Interface, lister pipelinev1.PipelineRunLister, recorder record.EventRecorder, r Interface, options ...controller.Options) controller.Reconciler { // Check the options function input. It should be 0 or 1. if len(options) > 1 { logger.Fatal("Up to one options struct is supported, found: ", len(options)) @@ -267,7 +267,7 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error { default: if err = r.updateStatus(ctx, logger, original, resource); err != nil { logger.Warnw("Failed to update resource status", zap.Error(err)) - r.Recorder.Eventf(resource, v1.EventTypeWarning, "UpdateFailed", + r.Recorder.Eventf(resource, corev1.EventTypeWarning, "UpdateFailed", "Failed to update status for %q: %v", resource.Name, err) return err } @@ -293,7 +293,7 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error { // This is a wrapped error, don't emit an event. } else { logger.Errorw("Returned an error", zap.Error(reconcileEvent)) - r.Recorder.Event(resource, v1.EventTypeWarning, "InternalError", reconcileEvent.Error()) + r.Recorder.Event(resource, corev1.EventTypeWarning, "InternalError", reconcileEvent.Error()) } return reconcileEvent } @@ -301,13 +301,13 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error { return nil } -func (r *reconcilerImpl) updateStatus(ctx context.Context, logger *zap.SugaredLogger, existing *v1beta1.PipelineRun, desired *v1beta1.PipelineRun) error { +func (r *reconcilerImpl) updateStatus(ctx context.Context, logger *zap.SugaredLogger, existing *v1.PipelineRun, desired *v1.PipelineRun) error { existing = existing.DeepCopy() return reconciler.RetryUpdateConflicts(func(attempts int) (err error) { // The first iteration tries to use the injectionInformer's state, subsequent attempts fetch the latest state via API. if attempts > 0 { - getter := r.Client.TektonV1beta1().PipelineRuns(desired.Namespace) + getter := r.Client.TektonV1().PipelineRuns(desired.Namespace) existing, err = getter.Get(ctx, desired.Name, metav1.GetOptions{}) if err != nil { @@ -328,7 +328,7 @@ func (r *reconcilerImpl) updateStatus(ctx context.Context, logger *zap.SugaredLo existing.Status = desired.Status - updater := r.Client.TektonV1beta1().PipelineRuns(existing.Namespace) + updater := r.Client.TektonV1().PipelineRuns(existing.Namespace) _, err = updater.UpdateStatus(ctx, existing, metav1.UpdateOptions{}) return err @@ -338,7 +338,7 @@ func (r *reconcilerImpl) updateStatus(ctx context.Context, logger *zap.SugaredLo // updateFinalizersFiltered will update the Finalizers of the resource. // TODO: this method could be generic and sync all finalizers. For now it only // updates defaultFinalizerName or its override. -func (r *reconcilerImpl) updateFinalizersFiltered(ctx context.Context, resource *v1beta1.PipelineRun, desiredFinalizers sets.String) (*v1beta1.PipelineRun, error) { +func (r *reconcilerImpl) updateFinalizersFiltered(ctx context.Context, resource *v1.PipelineRun, desiredFinalizers sets.String) (*v1.PipelineRun, error) { // Don't modify the informers copy. existing := resource.DeepCopy() @@ -376,21 +376,21 @@ func (r *reconcilerImpl) updateFinalizersFiltered(ctx context.Context, resource return resource, err } - patcher := r.Client.TektonV1beta1().PipelineRuns(resource.Namespace) + patcher := r.Client.TektonV1().PipelineRuns(resource.Namespace) resourceName := resource.Name updated, err := patcher.Patch(ctx, resourceName, types.MergePatchType, patch, metav1.PatchOptions{}) if err != nil { - r.Recorder.Eventf(existing, v1.EventTypeWarning, "FinalizerUpdateFailed", + r.Recorder.Eventf(existing, corev1.EventTypeWarning, "FinalizerUpdateFailed", "Failed to update finalizers for %q: %v", resourceName, err) } else { - r.Recorder.Eventf(updated, v1.EventTypeNormal, "FinalizerUpdate", + r.Recorder.Eventf(updated, corev1.EventTypeNormal, "FinalizerUpdate", "Updated %q finalizers", resource.GetName()) } return updated, err } -func (r *reconcilerImpl) setFinalizerIfFinalizer(ctx context.Context, resource *v1beta1.PipelineRun) (*v1beta1.PipelineRun, error) { +func (r *reconcilerImpl) setFinalizerIfFinalizer(ctx context.Context, resource *v1.PipelineRun) (*v1.PipelineRun, error) { if _, ok := r.reconciler.(Finalizer); !ok { return resource, nil } @@ -406,7 +406,7 @@ func (r *reconcilerImpl) setFinalizerIfFinalizer(ctx context.Context, resource * return r.updateFinalizersFiltered(ctx, resource, finalizers) } -func (r *reconcilerImpl) clearFinalizer(ctx context.Context, resource *v1beta1.PipelineRun, reconcileEvent reconciler.Event) (*v1beta1.PipelineRun, error) { +func (r *reconcilerImpl) clearFinalizer(ctx context.Context, resource *v1.PipelineRun, reconcileEvent reconciler.Event) (*v1.PipelineRun, error) { if _, ok := r.reconciler.(Finalizer); !ok { return resource, nil } @@ -419,7 +419,7 @@ func (r *reconcilerImpl) clearFinalizer(ctx context.Context, resource *v1beta1.P if reconcileEvent != nil { var event *reconciler.ReconcilerEvent if reconciler.EventAs(reconcileEvent, &event) { - if event.EventType == v1.EventTypeNormal { + if event.EventType == corev1.EventTypeNormal { finalizers.Delete(r.finalizerName) } } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/injection/reconciler/pipeline/v1beta1/pipelinerun/state.go b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/reconciler/pipeline/v1/pipelinerun/state.go similarity index 94% rename from vendor/github.com/tektoncd/pipeline/pkg/client/injection/reconciler/pipeline/v1beta1/pipelinerun/state.go rename to vendor/github.com/tektoncd/pipeline/pkg/client/injection/reconciler/pipeline/v1/pipelinerun/state.go index 39399186d9..35540fcf4e 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/injection/reconciler/pipeline/v1beta1/pipelinerun/state.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/reconciler/pipeline/v1/pipelinerun/state.go @@ -21,7 +21,7 @@ package pipelinerun import ( fmt "fmt" - v1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" types "k8s.io/apimachinery/pkg/types" cache "k8s.io/client-go/tools/cache" reconciler "knative.dev/pkg/reconciler" @@ -83,7 +83,7 @@ func (s *state) isNotLeaderNorObserver() bool { return false } -func (s *state) reconcileMethodFor(o *v1beta1.PipelineRun) (string, doReconcile) { +func (s *state) reconcileMethodFor(o *v1.PipelineRun) (string, doReconcile) { if o.GetDeletionTimestamp().IsZero() { if s.isLeader { return reconciler.DoReconcileKind, s.reconciler.ReconcileKind diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/injection/reconciler/pipeline/v1beta1/taskrun/controller.go b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/reconciler/pipeline/v1/taskrun/controller.go similarity index 99% rename from vendor/github.com/tektoncd/pipeline/pkg/client/injection/reconciler/pipeline/v1beta1/taskrun/controller.go rename to vendor/github.com/tektoncd/pipeline/pkg/client/injection/reconciler/pipeline/v1/taskrun/controller.go index c8dea030ba..2cf0767987 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/injection/reconciler/pipeline/v1beta1/taskrun/controller.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/reconciler/pipeline/v1/taskrun/controller.go @@ -26,7 +26,7 @@ import ( versionedscheme "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme" client "github.com/tektoncd/pipeline/pkg/client/injection/client" - taskrun "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/taskrun" + taskrun "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1/taskrun" zap "go.uber.org/zap" corev1 "k8s.io/api/core/v1" labels "k8s.io/apimachinery/pkg/labels" diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/injection/reconciler/pipeline/v1beta1/taskrun/reconciler.go b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/reconciler/pipeline/v1/taskrun/reconciler.go similarity index 86% rename from vendor/github.com/tektoncd/pipeline/pkg/client/injection/reconciler/pipeline/v1beta1/taskrun/reconciler.go rename to vendor/github.com/tektoncd/pipeline/pkg/client/injection/reconciler/pipeline/v1/taskrun/reconciler.go index ab6a96f5ae..30a208a556 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/injection/reconciler/pipeline/v1beta1/taskrun/reconciler.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/reconciler/pipeline/v1/taskrun/reconciler.go @@ -23,12 +23,12 @@ import ( json "encoding/json" fmt "fmt" - v1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" versioned "github.com/tektoncd/pipeline/pkg/client/clientset/versioned" - pipelinev1beta1 "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1beta1" + pipelinev1 "github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1" zap "go.uber.org/zap" "go.uber.org/zap/zapcore" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" equality "k8s.io/apimachinery/pkg/api/equality" errors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -43,40 +43,40 @@ import ( ) // Interface defines the strongly typed interfaces to be implemented by a -// controller reconciling v1beta1.TaskRun. +// controller reconciling v1.TaskRun. type Interface interface { - // ReconcileKind implements custom logic to reconcile v1beta1.TaskRun. Any changes + // ReconcileKind implements custom logic to reconcile v1.TaskRun. Any changes // to the objects .Status or .Finalizers will be propagated to the stored // object. It is recommended that implementors do not call any update calls // for the Kind inside of ReconcileKind, it is the responsibility of the calling // controller to propagate those properties. The resource passed to ReconcileKind // will always have an empty deletion timestamp. - ReconcileKind(ctx context.Context, o *v1beta1.TaskRun) reconciler.Event + ReconcileKind(ctx context.Context, o *v1.TaskRun) reconciler.Event } // Finalizer defines the strongly typed interfaces to be implemented by a -// controller finalizing v1beta1.TaskRun. +// controller finalizing v1.TaskRun. type Finalizer interface { - // FinalizeKind implements custom logic to finalize v1beta1.TaskRun. Any changes + // FinalizeKind implements custom logic to finalize v1.TaskRun. Any changes // to the objects .Status or .Finalizers will be ignored. Returning a nil or // Normal type reconciler.Event will allow the finalizer to be deleted on // the resource. The resource passed to FinalizeKind will always have a set // deletion timestamp. - FinalizeKind(ctx context.Context, o *v1beta1.TaskRun) reconciler.Event + FinalizeKind(ctx context.Context, o *v1.TaskRun) reconciler.Event } // ReadOnlyInterface defines the strongly typed interfaces to be implemented by a -// controller reconciling v1beta1.TaskRun if they want to process resources for which +// controller reconciling v1.TaskRun if they want to process resources for which // they are not the leader. type ReadOnlyInterface interface { - // ObserveKind implements logic to observe v1beta1.TaskRun. + // ObserveKind implements logic to observe v1.TaskRun. // This method should not write to the API. - ObserveKind(ctx context.Context, o *v1beta1.TaskRun) reconciler.Event + ObserveKind(ctx context.Context, o *v1.TaskRun) reconciler.Event } -type doReconcile func(ctx context.Context, o *v1beta1.TaskRun) reconciler.Event +type doReconcile func(ctx context.Context, o *v1.TaskRun) reconciler.Event -// reconcilerImpl implements controller.Reconciler for v1beta1.TaskRun resources. +// reconcilerImpl implements controller.Reconciler for v1.TaskRun resources. type reconcilerImpl struct { // LeaderAwareFuncs is inlined to help us implement reconciler.LeaderAware. reconciler.LeaderAwareFuncs @@ -85,7 +85,7 @@ type reconcilerImpl struct { Client versioned.Interface // Listers index properties about resources. - Lister pipelinev1beta1.TaskRunLister + Lister pipelinev1.TaskRunLister // Recorder is an event recorder for recording Event resources to the // Kubernetes API. @@ -112,7 +112,7 @@ var _ controller.Reconciler = (*reconcilerImpl)(nil) // Check that our generated Reconciler is always LeaderAware. var _ reconciler.LeaderAware = (*reconcilerImpl)(nil) -func NewReconciler(ctx context.Context, logger *zap.SugaredLogger, client versioned.Interface, lister pipelinev1beta1.TaskRunLister, recorder record.EventRecorder, r Interface, options ...controller.Options) controller.Reconciler { +func NewReconciler(ctx context.Context, logger *zap.SugaredLogger, client versioned.Interface, lister pipelinev1.TaskRunLister, recorder record.EventRecorder, r Interface, options ...controller.Options) controller.Reconciler { // Check the options function input. It should be 0 or 1. if len(options) > 1 { logger.Fatal("Up to one options struct is supported, found: ", len(options)) @@ -267,7 +267,7 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error { default: if err = r.updateStatus(ctx, logger, original, resource); err != nil { logger.Warnw("Failed to update resource status", zap.Error(err)) - r.Recorder.Eventf(resource, v1.EventTypeWarning, "UpdateFailed", + r.Recorder.Eventf(resource, corev1.EventTypeWarning, "UpdateFailed", "Failed to update status for %q: %v", resource.Name, err) return err } @@ -293,7 +293,7 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error { // This is a wrapped error, don't emit an event. } else { logger.Errorw("Returned an error", zap.Error(reconcileEvent)) - r.Recorder.Event(resource, v1.EventTypeWarning, "InternalError", reconcileEvent.Error()) + r.Recorder.Event(resource, corev1.EventTypeWarning, "InternalError", reconcileEvent.Error()) } return reconcileEvent } @@ -301,13 +301,13 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error { return nil } -func (r *reconcilerImpl) updateStatus(ctx context.Context, logger *zap.SugaredLogger, existing *v1beta1.TaskRun, desired *v1beta1.TaskRun) error { +func (r *reconcilerImpl) updateStatus(ctx context.Context, logger *zap.SugaredLogger, existing *v1.TaskRun, desired *v1.TaskRun) error { existing = existing.DeepCopy() return reconciler.RetryUpdateConflicts(func(attempts int) (err error) { // The first iteration tries to use the injectionInformer's state, subsequent attempts fetch the latest state via API. if attempts > 0 { - getter := r.Client.TektonV1beta1().TaskRuns(desired.Namespace) + getter := r.Client.TektonV1().TaskRuns(desired.Namespace) existing, err = getter.Get(ctx, desired.Name, metav1.GetOptions{}) if err != nil { @@ -328,7 +328,7 @@ func (r *reconcilerImpl) updateStatus(ctx context.Context, logger *zap.SugaredLo existing.Status = desired.Status - updater := r.Client.TektonV1beta1().TaskRuns(existing.Namespace) + updater := r.Client.TektonV1().TaskRuns(existing.Namespace) _, err = updater.UpdateStatus(ctx, existing, metav1.UpdateOptions{}) return err @@ -338,7 +338,7 @@ func (r *reconcilerImpl) updateStatus(ctx context.Context, logger *zap.SugaredLo // updateFinalizersFiltered will update the Finalizers of the resource. // TODO: this method could be generic and sync all finalizers. For now it only // updates defaultFinalizerName or its override. -func (r *reconcilerImpl) updateFinalizersFiltered(ctx context.Context, resource *v1beta1.TaskRun, desiredFinalizers sets.String) (*v1beta1.TaskRun, error) { +func (r *reconcilerImpl) updateFinalizersFiltered(ctx context.Context, resource *v1.TaskRun, desiredFinalizers sets.String) (*v1.TaskRun, error) { // Don't modify the informers copy. existing := resource.DeepCopy() @@ -376,21 +376,21 @@ func (r *reconcilerImpl) updateFinalizersFiltered(ctx context.Context, resource return resource, err } - patcher := r.Client.TektonV1beta1().TaskRuns(resource.Namespace) + patcher := r.Client.TektonV1().TaskRuns(resource.Namespace) resourceName := resource.Name updated, err := patcher.Patch(ctx, resourceName, types.MergePatchType, patch, metav1.PatchOptions{}) if err != nil { - r.Recorder.Eventf(existing, v1.EventTypeWarning, "FinalizerUpdateFailed", + r.Recorder.Eventf(existing, corev1.EventTypeWarning, "FinalizerUpdateFailed", "Failed to update finalizers for %q: %v", resourceName, err) } else { - r.Recorder.Eventf(updated, v1.EventTypeNormal, "FinalizerUpdate", + r.Recorder.Eventf(updated, corev1.EventTypeNormal, "FinalizerUpdate", "Updated %q finalizers", resource.GetName()) } return updated, err } -func (r *reconcilerImpl) setFinalizerIfFinalizer(ctx context.Context, resource *v1beta1.TaskRun) (*v1beta1.TaskRun, error) { +func (r *reconcilerImpl) setFinalizerIfFinalizer(ctx context.Context, resource *v1.TaskRun) (*v1.TaskRun, error) { if _, ok := r.reconciler.(Finalizer); !ok { return resource, nil } @@ -406,7 +406,7 @@ func (r *reconcilerImpl) setFinalizerIfFinalizer(ctx context.Context, resource * return r.updateFinalizersFiltered(ctx, resource, finalizers) } -func (r *reconcilerImpl) clearFinalizer(ctx context.Context, resource *v1beta1.TaskRun, reconcileEvent reconciler.Event) (*v1beta1.TaskRun, error) { +func (r *reconcilerImpl) clearFinalizer(ctx context.Context, resource *v1.TaskRun, reconcileEvent reconciler.Event) (*v1.TaskRun, error) { if _, ok := r.reconciler.(Finalizer); !ok { return resource, nil } @@ -419,7 +419,7 @@ func (r *reconcilerImpl) clearFinalizer(ctx context.Context, resource *v1beta1.T if reconcileEvent != nil { var event *reconciler.ReconcilerEvent if reconciler.EventAs(reconcileEvent, &event) { - if event.EventType == v1.EventTypeNormal { + if event.EventType == corev1.EventTypeNormal { finalizers.Delete(r.finalizerName) } } diff --git a/vendor/github.com/tektoncd/pipeline/pkg/client/injection/reconciler/pipeline/v1beta1/taskrun/state.go b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/reconciler/pipeline/v1/taskrun/state.go similarity index 95% rename from vendor/github.com/tektoncd/pipeline/pkg/client/injection/reconciler/pipeline/v1beta1/taskrun/state.go rename to vendor/github.com/tektoncd/pipeline/pkg/client/injection/reconciler/pipeline/v1/taskrun/state.go index 5f021a41ec..b989b339ba 100644 --- a/vendor/github.com/tektoncd/pipeline/pkg/client/injection/reconciler/pipeline/v1beta1/taskrun/state.go +++ b/vendor/github.com/tektoncd/pipeline/pkg/client/injection/reconciler/pipeline/v1/taskrun/state.go @@ -21,7 +21,7 @@ package taskrun import ( fmt "fmt" - v1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + v1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" types "k8s.io/apimachinery/pkg/types" cache "k8s.io/client-go/tools/cache" reconciler "knative.dev/pkg/reconciler" @@ -83,7 +83,7 @@ func (s *state) isNotLeaderNorObserver() bool { return false } -func (s *state) reconcileMethodFor(o *v1beta1.TaskRun) (string, doReconcile) { +func (s *state) reconcileMethodFor(o *v1.TaskRun) (string, doReconcile) { if o.GetDeletionTimestamp().IsZero() { if s.isLeader { return reconciler.DoReconcileKind, s.reconciler.ReconcileKind diff --git a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go index f309ee2b39..196c491bbb 100644 --- a/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go +++ b/vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go @@ -388,6 +388,9 @@ func (r *Registry) RegisterTypeMapEntry(bt bsontype.Type, rt reflect.Type) { // If no encoder is found, an error of type ErrNoEncoder is returned. LookupEncoder is safe for // concurrent use by multiple goroutines after all codecs and encoders are registered. func (r *Registry) LookupEncoder(valueType reflect.Type) (ValueEncoder, error) { + if valueType == nil { + return nil, ErrNoEncoder{Type: valueType} + } enc, found := r.lookupTypeEncoder(valueType) if found { if enc == nil { @@ -400,15 +403,10 @@ func (r *Registry) LookupEncoder(valueType reflect.Type) (ValueEncoder, error) { if found { return r.typeEncoders.LoadOrStore(valueType, enc), nil } - if valueType == nil { - r.storeTypeEncoder(valueType, nil) - return nil, ErrNoEncoder{Type: valueType} - } if v, ok := r.kindEncoders.Load(valueType.Kind()); ok { return r.storeTypeEncoder(valueType, v), nil } - r.storeTypeEncoder(valueType, nil) return nil, ErrNoEncoder{Type: valueType} } @@ -474,7 +472,6 @@ func (r *Registry) LookupDecoder(valueType reflect.Type) (ValueDecoder, error) { if v, ok := r.kindDecoders.Load(valueType.Kind()); ok { return r.storeTypeDecoder(valueType, v), nil } - r.storeTypeDecoder(valueType, nil) return nil, ErrNoDecoder{Type: valueType} } diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/database.go b/vendor/go.mongodb.org/mongo-driver/mongo/database.go index f5d5ad379b..6760f0d014 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/database.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/database.go @@ -185,18 +185,21 @@ func (db *Database) processRunCommand(ctx context.Context, cmd interface{}, op = operation.NewCommand(runCmdDoc) } - // TODO(GODRIVER-2649): ReadConcern(db.readConcern) will not actually pass the database's - // read concern. Remove this note once readConcern is correctly passed to the operation - // level. return op.Session(sess).CommandMonitor(db.client.monitor). ServerSelector(readSelect).ClusterClock(db.client.clock). - Database(db.name).Deployment(db.client.deployment).ReadConcern(db.readConcern). + Database(db.name).Deployment(db.client.deployment). Crypt(db.client.cryptFLE).ReadPreference(ro.ReadPreference).ServerAPI(db.client.serverAPI). Timeout(db.client.timeout).Logger(db.client.logger), sess, nil } -// RunCommand executes the given command against the database. This function does not obey the Database's read -// preference. To specify a read preference, the RunCmdOptions.ReadPreference option must be used. +// RunCommand executes the given command against the database. +// +// This function does not obey the Database's readPreference. To specify a read +// preference, the RunCmdOptions.ReadPreference option must be used. +// +// This function does not obey the Database's readConcern or writeConcern. A +// user must supply these values manually in the user-provided runCommand +// parameter. // // The runCommand parameter must be a document for the command to be executed. It cannot be nil. // This must be an order-preserving type such as bson.D. Map types such as bson.M are not valid. diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/index_view.go b/vendor/go.mongodb.org/mongo-driver/mongo/index_view.go index 41a93a2145..8d3555d0b0 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/index_view.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/index_view.go @@ -86,6 +86,9 @@ func (iv IndexView) List(ctx context.Context, opts ...*options.ListIndexesOption description.LatencySelector(iv.coll.client.localThreshold), }) selector = makeReadPrefSelector(sess, selector, iv.coll.client.localThreshold) + + // TODO(GODRIVER-3038): This operation should pass CSE to the ListIndexes + // Crypt setter to be applied to the operation. op := operation.NewListIndexes(). Session(sess).CommandMonitor(iv.coll.client.monitor). ServerSelector(selector).ClusterClock(iv.coll.client.clock). @@ -251,6 +254,10 @@ func (iv IndexView) CreateMany(ctx context.Context, models []IndexModel, opts .. option := options.MergeCreateIndexesOptions(opts...) + // TODO(GODRIVER-3038): This operation should pass CSE to the CreateIndexes + // Crypt setter to be applied to the operation. + // + // This was added in GODRIVER-2413 for the 2.0 major release. op := operation.NewCreateIndexes(indexes). Session(sess).WriteConcern(wc).ClusterClock(iv.coll.client.clock). Database(iv.coll.db.name).Collection(iv.coll.name).CommandMonitor(iv.coll.client.monitor). @@ -387,6 +394,9 @@ func (iv IndexView) drop(ctx context.Context, name string, opts ...*options.Drop selector := makePinnedSelector(sess, iv.coll.writeSelector) dio := options.MergeDropIndexesOptions(opts...) + + // TODO(GODRIVER-3038): This operation should pass CSE to the DropIndexes + // Crypt setter to be applied to the operation. op := operation.NewDropIndexes(name). Session(sess).WriteConcern(wc).CommandMonitor(iv.coll.client.monitor). ServerSelector(selector).ClusterClock(iv.coll.client.clock). diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/collectionoptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/collectionoptions.go index 04fda6d779..7904dbd672 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/collectionoptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/collectionoptions.go @@ -95,6 +95,9 @@ func MergeCollectionOptions(opts ...*CollectionOptions) *CollectionOptions { if opt.Registry != nil { c.Registry = opt.Registry } + if opt.BSONOptions != nil { + c.BSONOptions = opt.BSONOptions + } } return c diff --git a/vendor/go.mongodb.org/mongo-driver/mongo/options/dboptions.go b/vendor/go.mongodb.org/mongo-driver/mongo/options/dboptions.go index 8a380d2168..38ee13550b 100644 --- a/vendor/go.mongodb.org/mongo-driver/mongo/options/dboptions.go +++ b/vendor/go.mongodb.org/mongo-driver/mongo/options/dboptions.go @@ -95,6 +95,9 @@ func MergeDatabaseOptions(opts ...*DatabaseOptions) *DatabaseOptions { if opt.Registry != nil { d.Registry = opt.Registry } + if opt.BSONOptions != nil { + d.BSONOptions = opt.BSONOptions + } } return d diff --git a/vendor/go.mongodb.org/mongo-driver/version/version.go b/vendor/go.mongodb.org/mongo-driver/version/version.go index 4bd1f9b5e3..738d44e6aa 100644 --- a/vendor/go.mongodb.org/mongo-driver/version/version.go +++ b/vendor/go.mongodb.org/mongo-driver/version/version.go @@ -8,4 +8,4 @@ package version // import "go.mongodb.org/mongo-driver/version" // Driver is the current version of the driver. -var Driver = "v1.13.0" +var Driver = "v1.13.1" diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/command.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/command.go index 5aad3f72e6..35283794a3 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/command.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/command.go @@ -14,7 +14,6 @@ import ( "go.mongodb.org/mongo-driver/event" "go.mongodb.org/mongo-driver/internal/logger" "go.mongodb.org/mongo-driver/mongo/description" - "go.mongodb.org/mongo-driver/mongo/readconcern" "go.mongodb.org/mongo-driver/mongo/readpref" "go.mongodb.org/mongo-driver/x/bsonx/bsoncore" "go.mongodb.org/mongo-driver/x/mongo/driver" @@ -24,7 +23,6 @@ import ( // Command is used to run a generic operation. type Command struct { command bsoncore.Document - readConcern *readconcern.ReadConcern database string deployment driver.Deployment selector description.ServerSelector @@ -79,7 +77,6 @@ func (c *Command) Execute(ctx context.Context) error { return errors.New("the Command operation must have a Deployment set before Execute can be called") } - // TODO(GODRIVER-2649): Actually pass readConcern to underlying driver.Operation. return driver.Operation{ CommandFn: func(dst []byte, desc description.SelectedServer) ([]byte, error) { return append(dst, c.command[4:len(c.command)-1]...), nil @@ -163,16 +160,6 @@ func (c *Command) Deployment(deployment driver.Deployment) *Command { return c } -// ReadConcern specifies the read concern for this operation. -func (c *Command) ReadConcern(readConcern *readconcern.ReadConcern) *Command { - if c == nil { - c = new(Command) - } - - c.readConcern = readConcern - return c -} - // ReadPreference set the read preference used with this operation. func (c *Command) ReadPreference(readPreference *readpref.ReadPref) *Command { if c == nil { diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/hello.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/hello.go index 6e750fd034..16f2ebf6c0 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/hello.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/operation/hello.go @@ -530,7 +530,7 @@ func (h *Hello) handshakeCommand(dst []byte, desc description.SelectedServer) ([ func (h *Hello) command(dst []byte, desc description.SelectedServer) ([]byte, error) { // Use "hello" if topology is LoadBalanced, API version is declared or server // has responded with "helloOk". Otherwise, use legacy hello. - if desc.Kind == description.LoadBalanced || h.serverAPI != nil || desc.Server.HelloOK { + if h.loadBalanced || h.serverAPI != nil || desc.Server.HelloOK { dst = bsoncore.AppendInt32Element(dst, "hello", 1) } else { dst = bsoncore.AppendInt32Element(dst, handshake.LegacyHello, 1) @@ -575,8 +575,8 @@ func (h *Hello) StreamResponse(ctx context.Context, conn driver.StreamerConnecti // loadBalanced is False. If this is the case, then the drivers MUST use legacy // hello for the first message of the initial handshake with the OP_QUERY // protocol -func isLegacyHandshake(srvAPI *driver.ServerAPIOptions, deployment driver.Deployment) bool { - return srvAPI == nil && deployment.Kind() != description.LoadBalanced +func isLegacyHandshake(srvAPI *driver.ServerAPIOptions, loadbalanced bool) bool { + return srvAPI == nil && !loadbalanced } func (h *Hello) createOperation() driver.Operation { @@ -592,7 +592,7 @@ func (h *Hello) createOperation() driver.Operation { ServerAPI: h.serverAPI, } - if isLegacyHandshake(h.serverAPI, h.d) { + if isLegacyHandshake(h.serverAPI, h.loadBalanced) { op.Legacy = driver.LegacyHandshake } @@ -616,7 +616,7 @@ func (h *Hello) GetHandshakeInformation(ctx context.Context, _ address.Address, ServerAPI: h.serverAPI, } - if isLegacyHandshake(h.serverAPI, deployment) { + if isLegacyHandshake(h.serverAPI, h.loadBalanced) { op.Legacy = driver.LegacyHandshake } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/connection.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/connection.go index af25b1f68c..ac78c12045 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/connection.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/connection.go @@ -314,7 +314,7 @@ func transformNetworkError(ctx context.Context, originalError error, contextDead } // If there was an error and the context was cancelled, we assume it happened due to the cancellation. - if ctx.Err() == context.Canceled { + if errors.Is(ctx.Err(), context.Canceled) { return context.Canceled } @@ -858,7 +858,7 @@ func newCancellListener() *cancellListener { // Listen blocks until the provided context is cancelled or listening is aborted // via the StopListening function. If this detects that the context has been -// cancelled (i.e. ctx.Err() == context.Canceled), the provided callback is +// cancelled (i.e. errors.Is(ctx.Err(), context.Canceled), the provided callback is // called to abort in-progress work. Even if the context expires, this function // will block until StopListening is called. func (c *cancellListener) Listen(ctx context.Context, abortFn func()) { @@ -866,7 +866,7 @@ func (c *cancellListener) Listen(ctx context.Context, abortFn func()) { select { case <-ctx.Done(): - if ctx.Err() == context.Canceled { + if errors.Is(ctx.Err(), context.Canceled) { c.aborted = true abortFn() } diff --git a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/server.go b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/server.go index 5823d3d7ae..1a9ee28241 100644 --- a/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/server.go +++ b/vendor/go.mongodb.org/mongo-driver/x/mongo/driver/topology/server.go @@ -525,7 +525,7 @@ func (s *Server) ProcessError(err error, conn driver.Connection) driver.ProcessE if netErr, ok := wrappedConnErr.(net.Error); ok && netErr.Timeout() { return driver.NoChange } - if wrappedConnErr == context.Canceled || wrappedConnErr == context.DeadlineExceeded { + if errors.Is(wrappedConnErr, context.Canceled) || errors.Is(wrappedConnErr, context.DeadlineExceeded) { return driver.NoChange } @@ -625,7 +625,7 @@ func (s *Server) update() { // Retry after the first timeout before clearing the pool in case of a FAAS pause as // described in GODRIVER-2577. if err := unwrapConnectionError(desc.LastError); err != nil && timeoutCnt < 1 { - if err == context.Canceled || err == context.DeadlineExceeded { + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { timeoutCnt++ // We want to immediately retry on timeout error. Continue to next loop. return true diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/conditions.go b/vendor/k8s.io/apimachinery/pkg/api/meta/conditions.go index 60c8209de0..cbdf2eeb83 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/conditions.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/conditions.go @@ -22,14 +22,15 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// SetStatusCondition sets the corresponding condition in conditions to newCondition. +// SetStatusCondition sets the corresponding condition in conditions to newCondition and returns true +// if the conditions are changed by this call. // conditions must be non-nil. // 1. if the condition of the specified type already exists (all fields of the existing condition are updated to // newCondition, LastTransitionTime is set to now if the new status differs from the old status) // 2. if a condition of the specified type does not exist (LastTransitionTime is set to now() if unset, and newCondition is appended) -func SetStatusCondition(conditions *[]metav1.Condition, newCondition metav1.Condition) { +func SetStatusCondition(conditions *[]metav1.Condition, newCondition metav1.Condition) (changed bool) { if conditions == nil { - return + return false } existingCondition := FindStatusCondition(*conditions, newCondition.Type) if existingCondition == nil { @@ -37,7 +38,7 @@ func SetStatusCondition(conditions *[]metav1.Condition, newCondition metav1.Cond newCondition.LastTransitionTime = metav1.NewTime(time.Now()) } *conditions = append(*conditions, newCondition) - return + return true } if existingCondition.Status != newCondition.Status { @@ -47,18 +48,31 @@ func SetStatusCondition(conditions *[]metav1.Condition, newCondition metav1.Cond } else { existingCondition.LastTransitionTime = metav1.NewTime(time.Now()) } + changed = true } - existingCondition.Reason = newCondition.Reason - existingCondition.Message = newCondition.Message - existingCondition.ObservedGeneration = newCondition.ObservedGeneration + if existingCondition.Reason != newCondition.Reason { + existingCondition.Reason = newCondition.Reason + changed = true + } + if existingCondition.Message != newCondition.Message { + existingCondition.Message = newCondition.Message + changed = true + } + if existingCondition.ObservedGeneration != newCondition.ObservedGeneration { + existingCondition.ObservedGeneration = newCondition.ObservedGeneration + changed = true + } + + return changed } -// RemoveStatusCondition removes the corresponding conditionType from conditions. +// RemoveStatusCondition removes the corresponding conditionType from conditions if present. Returns +// true if it was present and got removed. // conditions must be non-nil. -func RemoveStatusCondition(conditions *[]metav1.Condition, conditionType string) { +func RemoveStatusCondition(conditions *[]metav1.Condition, conditionType string) (removed bool) { if conditions == nil || len(*conditions) == 0 { - return + return false } newConditions := make([]metav1.Condition, 0, len(*conditions)-1) for _, condition := range *conditions { @@ -67,7 +81,10 @@ func RemoveStatusCondition(conditions *[]metav1.Condition, conditionType string) } } + removed = len(*conditions) != len(newConditions) *conditions = newConditions + + return removed } // FindStatusCondition finds the conditionType in conditions. diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/amount.go b/vendor/k8s.io/apimachinery/pkg/api/resource/amount.go index a8866a43e1..2eebec667d 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/amount.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/amount.go @@ -203,6 +203,44 @@ func (a *int64Amount) Sub(b int64Amount) bool { return a.Add(int64Amount{value: -b.value, scale: b.scale}) } +// Mul multiplies the provided b to the current amount, or +// returns false if overflow or underflow would result. +func (a *int64Amount) Mul(b int64) bool { + switch { + case a.value == 0: + return true + case b == 0: + a.value = 0 + a.scale = 0 + return true + case a.scale == 0: + c, ok := int64Multiply(a.value, b) + if !ok { + return false + } + a.value = c + case a.scale > 0: + c, ok := int64Multiply(a.value, b) + if !ok { + return false + } + if _, ok = positiveScaleInt64(c, a.scale); !ok { + return false + } + a.value = c + default: + c, ok := int64Multiply(a.value, b) + if !ok { + return false + } + if _, ok = negativeScaleInt64(c, -a.scale); !ok { + return false + } + a.value = c + } + return true +} + // AsScale adjusts this amount to set a minimum scale, rounding up, and returns true iff no precision // was lost. (1.1e5).AsScale(5) would return 1.1e5, but (1.1e5).AsScale(6) would return 1e6. func (a int64Amount) AsScale(scale Scale) (int64Amount, bool) { diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go index b47d554b3c..69f1bc336d 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go @@ -592,6 +592,16 @@ func (q *Quantity) Sub(y Quantity) { q.ToDec().d.Dec.Sub(q.d.Dec, y.AsDec()) } +// Mul multiplies the provided y to the current value. +// It will return false if the result is inexact. Otherwise, it will return true. +func (q *Quantity) Mul(y int64) bool { + q.s = "" + if q.d.Dec == nil && q.i.Mul(y) { + return true + } + return q.ToDec().d.Dec.Mul(q.d.Dec, inf.NewDec(y, inf.Scale(0))).UnscaledBig().IsInt64() +} + // Cmp returns 0 if the quantity is equal to y, -1 if the quantity is less than y, or 1 if the // quantity is greater than y. func (q *Quantity) Cmp(y Quantity) int { diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go index 2e33283ef2..0f58d66c09 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go @@ -173,7 +173,7 @@ func NestedStringMap(obj map[string]interface{}, fields ...string) (map[string]s if str, ok := v.(string); ok { strMap[k] = str } else { - return nil, false, fmt.Errorf("%v accessor error: contains non-string key in the map: %v is of the type %T, expected string", jsonPath(fields), v, v) + return nil, false, fmt.Errorf("%v accessor error: contains non-string value in the map under key %q: %v is of the type %T, expected string", jsonPath(fields), k, v, v) } } return strMap, true, nil diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/helper.go b/vendor/k8s.io/apimachinery/pkg/runtime/helper.go index 7bd1a3a6a5..f46a24cc6c 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/helper.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/helper.go @@ -257,3 +257,26 @@ func (d WithoutVersionDecoder) Decode(data []byte, defaults *schema.GroupVersion } return obj, gvk, err } + +type encoderWithAllocator struct { + encoder EncoderWithAllocator + memAllocator MemoryAllocator +} + +// NewEncoderWithAllocator returns a new encoder +func NewEncoderWithAllocator(e EncoderWithAllocator, a MemoryAllocator) Encoder { + return &encoderWithAllocator{ + encoder: e, + memAllocator: a, + } +} + +// Encode writes the provided object to the nested writer +func (e *encoderWithAllocator) Encode(obj Object, w io.Writer) error { + return e.encoder.EncodeWithAllocator(obj, w, e.memAllocator) +} + +// Identifier returns identifier of this encoder. +func (e *encoderWithAllocator) Identifier() Identifier { + return e.encoder.Identifier() +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go index 87b3fec3f2..971c46d496 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go @@ -134,23 +134,3 @@ func (e *encoder) Encode(obj runtime.Object) error { e.buf.Reset() return err } - -type encoderWithAllocator struct { - writer io.Writer - encoder runtime.EncoderWithAllocator - memAllocator runtime.MemoryAllocator -} - -// NewEncoderWithAllocator returns a new streaming encoder -func NewEncoderWithAllocator(w io.Writer, e runtime.EncoderWithAllocator, a runtime.MemoryAllocator) Encoder { - return &encoderWithAllocator{ - writer: w, - encoder: e, - memAllocator: a, - } -} - -// Encode writes the provided object to the nested writer -func (e *encoderWithAllocator) Encode(obj runtime.Object) error { - return e.encoder.EncodeWithAllocator(obj, e.writer, e.memAllocator) -} diff --git a/vendor/k8s.io/apimachinery/pkg/util/cache/lruexpirecache.go b/vendor/k8s.io/apimachinery/pkg/util/cache/lruexpirecache.go index 1328dd6120..ad486d580f 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/cache/lruexpirecache.go +++ b/vendor/k8s.io/apimachinery/pkg/util/cache/lruexpirecache.go @@ -136,6 +136,19 @@ func (c *LRUExpireCache) Remove(key interface{}) { delete(c.entries, key) } +// RemoveAll removes all keys that match predicate. +func (c *LRUExpireCache) RemoveAll(predicate func(key any) bool) { + c.lock.Lock() + defer c.lock.Unlock() + + for key, element := range c.entries { + if predicate(key) { + c.evictionList.Remove(element) + delete(c.entries, key) + } + } +} + // Keys returns all unexpired keys in the cache. // // Keep in mind that subsequent calls to Get() for any of the returned keys diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go index 0ea88156be..f358c794d1 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go @@ -72,14 +72,14 @@ func FromString(val string) IntOrString { return IntOrString{Type: String, StrVal: val} } -// Parse the given string and try to convert it to an integer before +// Parse the given string and try to convert it to an int32 integer before // setting it as a string value. func Parse(val string) IntOrString { - i, err := strconv.Atoi(val) + i, err := strconv.ParseInt(val, 10, 32) if err != nil { return FromString(val) } - return FromInt(i) + return FromInt32(int32(i)) } // UnmarshalJSON implements the json.Unmarshaller interface. diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/structuredmerge.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/structuredmerge.go index 2112c9ab7e..786ad991c2 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/structuredmerge.go +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/structuredmerge.go @@ -25,6 +25,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "sigs.k8s.io/structured-merge-diff/v4/fieldpath" "sigs.k8s.io/structured-merge-diff/v4/merge" + "sigs.k8s.io/structured-merge-diff/v4/typed" ) type structuredMergeManager struct { @@ -95,11 +96,11 @@ func (f *structuredMergeManager) Update(liveObj, newObj runtime.Object, managed if err != nil { return nil, nil, fmt.Errorf("failed to convert live object (%v) to proper version: %v", objectGVKNN(liveObj), err) } - newObjTyped, err := f.typeConverter.ObjectToTyped(newObjVersioned) + newObjTyped, err := f.typeConverter.ObjectToTyped(newObjVersioned, typed.AllowDuplicates) if err != nil { return nil, nil, fmt.Errorf("failed to convert new object (%v) to smd typed: %v", objectGVKNN(newObjVersioned), err) } - liveObjTyped, err := f.typeConverter.ObjectToTyped(liveObjVersioned) + liveObjTyped, err := f.typeConverter.ObjectToTyped(liveObjVersioned, typed.AllowDuplicates) if err != nil { return nil, nil, fmt.Errorf("failed to convert live object (%v) to smd typed: %v", objectGVKNN(liveObjVersioned), err) } @@ -139,11 +140,13 @@ func (f *structuredMergeManager) Apply(liveObj, patchObj runtime.Object, managed return nil, nil, fmt.Errorf("failed to convert live object (%v) to proper version: %v", objectGVKNN(liveObj), err) } + // Don't allow duplicates in the applied object. patchObjTyped, err := f.typeConverter.ObjectToTyped(patchObj) if err != nil { return nil, nil, fmt.Errorf("failed to create typed patch object (%v): %v", objectGVKNN(patchObj), err) } - liveObjTyped, err := f.typeConverter.ObjectToTyped(liveObjVersioned) + + liveObjTyped, err := f.typeConverter.ObjectToTyped(liveObjVersioned, typed.AllowDuplicates) if err != nil { return nil, nil, fmt.Errorf("failed to create typed live object (%v): %v", objectGVKNN(liveObjVersioned), err) } diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/typeconverter.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/typeconverter.go index 1ac96d7f7b..c6449467cf 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/typeconverter.go +++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/internal/typeconverter.go @@ -32,7 +32,7 @@ import ( // TypeConverter allows you to convert from runtime.Object to // typed.TypedValue and the other way around. type TypeConverter interface { - ObjectToTyped(runtime.Object) (*typed.TypedValue, error) + ObjectToTyped(runtime.Object, ...typed.ValidationOptions) (*typed.TypedValue, error) TypedToObject(*typed.TypedValue) (runtime.Object, error) } @@ -54,7 +54,7 @@ func NewTypeConverter(openapiSpec map[string]*spec.Schema, preserveUnknownFields return &typeConverter{parser: tr}, nil } -func (c *typeConverter) ObjectToTyped(obj runtime.Object) (*typed.TypedValue, error) { +func (c *typeConverter) ObjectToTyped(obj runtime.Object, opts ...typed.ValidationOptions) (*typed.TypedValue, error) { gvk := obj.GetObjectKind().GroupVersionKind() t := c.parser[gvk] if t == nil { @@ -62,9 +62,9 @@ func (c *typeConverter) ObjectToTyped(obj runtime.Object) (*typed.TypedValue, er } switch o := obj.(type) { case *unstructured.Unstructured: - return t.FromUnstructured(o.UnstructuredContent()) + return t.FromUnstructured(o.UnstructuredContent(), opts...) default: - return t.FromStructured(obj) + return t.FromStructured(obj, opts...) } } @@ -84,12 +84,12 @@ func NewDeducedTypeConverter() TypeConverter { } // ObjectToTyped converts an object into a TypedValue with a "deduced type". -func (deducedTypeConverter) ObjectToTyped(obj runtime.Object) (*typed.TypedValue, error) { +func (deducedTypeConverter) ObjectToTyped(obj runtime.Object, opts ...typed.ValidationOptions) (*typed.TypedValue, error) { switch o := obj.(type) { case *unstructured.Unstructured: - return typed.DeducedParseableType.FromUnstructured(o.UnstructuredContent()) + return typed.DeducedParseableType.FromUnstructured(o.UnstructuredContent(), opts...) default: - return typed.DeducedParseableType.FromStructured(obj) + return typed.DeducedParseableType.FromStructured(obj, opts...) } } diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go index df305b712c..85b0cfc072 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go @@ -20,12 +20,17 @@ import ( "errors" "fmt" "reflect" + "strings" "k8s.io/apimachinery/pkg/util/mergepatch" forkedjson "k8s.io/apimachinery/third_party/forked/golang/json" openapi "k8s.io/kube-openapi/pkg/util/proto" + "k8s.io/kube-openapi/pkg/validation/spec" ) +const patchMergeKey = "x-kubernetes-patch-merge-key" +const patchStrategy = "x-kubernetes-patch-strategy" + type PatchMeta struct { patchStrategies []string patchMergeKey string @@ -148,6 +153,90 @@ func GetTagStructTypeOrDie(dataStruct interface{}) reflect.Type { return t } +type PatchMetaFromOpenAPIV3 struct { + // SchemaList is required to resolve OpenAPI V3 references + SchemaList map[string]*spec.Schema + Schema *spec.Schema +} + +func (s PatchMetaFromOpenAPIV3) traverse(key string) (PatchMetaFromOpenAPIV3, error) { + if s.Schema == nil { + return PatchMetaFromOpenAPIV3{}, nil + } + if len(s.Schema.Properties) == 0 { + return PatchMetaFromOpenAPIV3{}, fmt.Errorf("unable to find api field \"%s\"", key) + } + subschema, ok := s.Schema.Properties[key] + if !ok { + return PatchMetaFromOpenAPIV3{}, fmt.Errorf("unable to find api field \"%s\"", key) + } + return PatchMetaFromOpenAPIV3{SchemaList: s.SchemaList, Schema: &subschema}, nil +} + +func resolve(l *PatchMetaFromOpenAPIV3) error { + if len(l.Schema.AllOf) > 0 { + l.Schema = &l.Schema.AllOf[0] + } + if refString := l.Schema.Ref.String(); refString != "" { + str := strings.TrimPrefix(refString, "#/components/schemas/") + sch, ok := l.SchemaList[str] + if ok { + l.Schema = sch + } else { + return fmt.Errorf("unable to resolve %s in OpenAPI V3", refString) + } + } + return nil +} + +func (s PatchMetaFromOpenAPIV3) LookupPatchMetadataForStruct(key string) (LookupPatchMeta, PatchMeta, error) { + l, err := s.traverse(key) + if err != nil { + return l, PatchMeta{}, err + } + p := PatchMeta{} + f, ok := l.Schema.Extensions[patchMergeKey] + if ok { + p.SetPatchMergeKey(f.(string)) + } + g, ok := l.Schema.Extensions[patchStrategy] + if ok { + p.SetPatchStrategies(strings.Split(g.(string), ",")) + } + + err = resolve(&l) + return l, p, err +} + +func (s PatchMetaFromOpenAPIV3) LookupPatchMetadataForSlice(key string) (LookupPatchMeta, PatchMeta, error) { + l, err := s.traverse(key) + if err != nil { + return l, PatchMeta{}, err + } + p := PatchMeta{} + f, ok := l.Schema.Extensions[patchMergeKey] + if ok { + p.SetPatchMergeKey(f.(string)) + } + g, ok := l.Schema.Extensions[patchStrategy] + if ok { + p.SetPatchStrategies(strings.Split(g.(string), ",")) + } + if l.Schema.Items != nil { + l.Schema = l.Schema.Items.Schema + } + err = resolve(&l) + return l, p, err +} + +func (s PatchMetaFromOpenAPIV3) Name() string { + schema := s.Schema + if len(schema.Type) > 0 { + return strings.Join(schema.Type, "") + } + return "Struct" +} + type PatchMetaFromOpenAPI struct { Schema openapi.Schema } diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go index ae73bda966..bc387d0116 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go @@ -200,12 +200,12 @@ func Invalid(field *Path, value interface{}, detail string) *Error { // NotSupported returns a *Error indicating "unsupported value". // This is used to report unknown values for enumerated fields (e.g. a list of // valid values). -func NotSupported(field *Path, value interface{}, validValues []string) *Error { +func NotSupported[T ~string](field *Path, value interface{}, validValues []T) *Error { detail := "" if len(validValues) > 0 { quotedValues := make([]string, len(validValues)) for i, v := range validValues { - quotedValues[i] = strconv.Quote(v) + quotedValues[i] = strconv.Quote(fmt.Sprint(v)) } detail = "supported values: " + strings.Join(quotedValues, ", ") } diff --git a/vendor/k8s.io/apimachinery/pkg/util/version/version.go b/vendor/k8s.io/apimachinery/pkg/util/version/version.go index 4c61956953..2292ba1376 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/version/version.go +++ b/vendor/k8s.io/apimachinery/pkg/util/version/version.go @@ -18,6 +18,7 @@ package version import ( "bytes" + "errors" "fmt" "regexp" "strconv" @@ -85,6 +86,47 @@ func parse(str string, semver bool) (*Version, error) { return v, nil } +// HighestSupportedVersion returns the highest supported version +// This function assumes that the highest supported version must be v1.x. +func HighestSupportedVersion(versions []string) (*Version, error) { + if len(versions) == 0 { + return nil, errors.New("empty array for supported versions") + } + + var ( + highestSupportedVersion *Version + theErr error + ) + + for i := len(versions) - 1; i >= 0; i-- { + currentHighestVer, err := ParseGeneric(versions[i]) + if err != nil { + theErr = err + continue + } + + if currentHighestVer.Major() > 1 { + continue + } + + if highestSupportedVersion == nil || highestSupportedVersion.LessThan(currentHighestVer) { + highestSupportedVersion = currentHighestVer + } + } + + if highestSupportedVersion == nil { + return nil, fmt.Errorf( + "could not find a highest supported version from versions (%v) reported: %+v", + versions, theErr) + } + + if highestSupportedVersion.Major() != 1 { + return nil, fmt.Errorf("highest supported version reported is %v, must be v1.x", highestSupportedVersion) + } + + return highestSupportedVersion, nil +} + // ParseGeneric parses a "generic" version string. The version string must consist of two // or more dot-separated numeric fields (the first of which can't have leading zeroes), // followed by arbitrary uninterpreted data (which need not be separated from the final diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/loop.go b/vendor/k8s.io/apimachinery/pkg/util/wait/loop.go index 0dd13c626c..107bfc132f 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/wait/loop.go +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/loop.go @@ -40,6 +40,10 @@ func loopConditionUntilContext(ctx context.Context, t Timer, immediate, sliding var timeCh <-chan time.Time doneCh := ctx.Done() + if !sliding { + timeCh = t.C() + } + // if immediate is true the condition is // guaranteed to be executed at least once, // if we haven't requested immediate execution, delay once @@ -50,17 +54,27 @@ func loopConditionUntilContext(ctx context.Context, t Timer, immediate, sliding }(); err != nil || ok { return err } - } else { + } + + if sliding { timeCh = t.C() + } + + for { + + // Wait for either the context to be cancelled or the next invocation be called select { case <-doneCh: return ctx.Err() case <-timeCh: } - } - for { - // checking ctx.Err() is slightly faster than checking a select + // IMPORTANT: Because there is no channel priority selection in golang + // it is possible for very short timers to "win" the race in the previous select + // repeatedly even when the context has been canceled. We therefore must + // explicitly check for context cancellation on every loop and exit if true to + // guarantee that we don't invoke condition more than once after context has + // been cancelled. if err := ctx.Err(); err != nil { return err } @@ -77,21 +91,5 @@ func loopConditionUntilContext(ctx context.Context, t Timer, immediate, sliding if sliding { t.Next() } - - if timeCh == nil { - timeCh = t.C() - } - - // NOTE: b/c there is no priority selection in golang - // it is possible for this to race, meaning we could - // trigger t.C and doneCh, and t.C select falls through. - // In order to mitigate we re-check doneCh at the beginning - // of every loop to guarantee at-most one extra execution - // of condition. - select { - case <-doneCh: - return ctx.Err() - case <-timeCh: - } } } diff --git a/vendor/k8s.io/klog/v2/.golangci.yaml b/vendor/k8s.io/klog/v2/.golangci.yaml new file mode 100644 index 0000000000..0d77d65f06 --- /dev/null +++ b/vendor/k8s.io/klog/v2/.golangci.yaml @@ -0,0 +1,6 @@ +linters: + disable-all: true + enable: # sorted alphabetical + - gofmt + - misspell + - revive diff --git a/vendor/k8s.io/klog/v2/internal/buffer/buffer.go b/vendor/k8s.io/klog/v2/internal/buffer/buffer.go index f325ded5e9..46de00fb06 100644 --- a/vendor/k8s.io/klog/v2/internal/buffer/buffer.go +++ b/vendor/k8s.io/klog/v2/internal/buffer/buffer.go @@ -30,14 +30,16 @@ import ( var ( // Pid is inserted into log headers. Can be overridden for tests. Pid = os.Getpid() + + // Time, if set, will be used instead of the actual current time. + Time *time.Time ) // Buffer holds a single byte.Buffer for reuse. The zero value is ready for // use. It also provides some helper methods for output formatting. type Buffer struct { bytes.Buffer - Tmp [64]byte // temporary byte array for creating headers. - next *Buffer + Tmp [64]byte // temporary byte array for creating headers. } var buffers = sync.Pool{ @@ -122,6 +124,9 @@ func (buf *Buffer) FormatHeader(s severity.Severity, file string, line int, now // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand. // It's worth about 3X. Fprintf is hard. + if Time != nil { + now = *Time + } _, month, day := now.Date() hour, minute, second := now.Clock() // Lmmdd hh:mm:ss.uuuuuu threadid file:line] @@ -157,6 +162,9 @@ func (buf *Buffer) SprintHeader(s severity.Severity, now time.Time) string { // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand. // It's worth about 3X. Fprintf is hard. + if Time != nil { + now = *Time + } _, month, day := now.Date() hour, minute, second := now.Clock() // Lmmdd hh:mm:ss.uuuuuu threadid file:line] diff --git a/vendor/k8s.io/klog/v2/internal/clock/clock.go b/vendor/k8s.io/klog/v2/internal/clock/clock.go index b8b6af5c81..cc11bb4802 100644 --- a/vendor/k8s.io/klog/v2/internal/clock/clock.go +++ b/vendor/k8s.io/klog/v2/internal/clock/clock.go @@ -39,16 +39,6 @@ type Clock interface { // Sleep sleeps for the provided duration d. // Consider making the sleep interruptible by using 'select' on a context channel and a timer channel. Sleep(d time.Duration) - // Tick returns the channel of a new Ticker. - // This method does not allow to free/GC the backing ticker. Use - // NewTicker from WithTicker instead. - Tick(d time.Duration) <-chan time.Time -} - -// WithTicker allows for injecting fake or real clocks into code that -// needs to do arbitrary things based on time. -type WithTicker interface { - Clock // NewTicker returns a new Ticker. NewTicker(time.Duration) Ticker } @@ -66,7 +56,7 @@ type WithDelayedExecution interface { // WithTickerAndDelayedExecution allows for injecting fake or real clocks // into code that needs Ticker and AfterFunc functionality type WithTickerAndDelayedExecution interface { - WithTicker + Clock // AfterFunc executes f in its own goroutine after waiting // for d duration and returns a Timer whose channel can be // closed by calling Stop() on the Timer. @@ -79,7 +69,7 @@ type Ticker interface { Stop() } -var _ = WithTicker(RealClock{}) +var _ Clock = RealClock{} // RealClock really calls time.Now() type RealClock struct{} @@ -115,13 +105,6 @@ func (RealClock) AfterFunc(d time.Duration, f func()) Timer { } } -// Tick is the same as time.Tick(d) -// This method does not allow to free/GC the backing ticker. Use -// NewTicker instead. -func (RealClock) Tick(d time.Duration) <-chan time.Time { - return time.Tick(d) -} - // NewTicker returns a new Ticker. func (RealClock) NewTicker(d time.Duration) Ticker { return &realTicker{ diff --git a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go index bcdf5f8ee1..d1a4751c94 100644 --- a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go +++ b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go @@ -172,73 +172,6 @@ func KVListFormat(b *bytes.Buffer, keysAndValues ...interface{}) { Formatter{}.KVListFormat(b, keysAndValues...) } -// KVFormat serializes one key/value pair into the provided buffer. -// A space gets inserted before the pair. -func (f Formatter) KVFormat(b *bytes.Buffer, k, v interface{}) { - b.WriteByte(' ') - // Keys are assumed to be well-formed according to - // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments - // for the sake of performance. Keys with spaces, - // special characters, etc. will break parsing. - if sK, ok := k.(string); ok { - // Avoid one allocation when the key is a string, which - // normally it should be. - b.WriteString(sK) - } else { - b.WriteString(fmt.Sprintf("%s", k)) - } - - // The type checks are sorted so that more frequently used ones - // come first because that is then faster in the common - // cases. In Kubernetes, ObjectRef (a Stringer) is more common - // than plain strings - // (https://github.com/kubernetes/kubernetes/pull/106594#issuecomment-975526235). - switch v := v.(type) { - case textWriter: - writeTextWriterValue(b, v) - case fmt.Stringer: - writeStringValue(b, StringerToString(v)) - case string: - writeStringValue(b, v) - case error: - writeStringValue(b, ErrorToString(v)) - case logr.Marshaler: - value := MarshalerToValue(v) - // A marshaler that returns a string is useful for - // delayed formatting of complex values. We treat this - // case like a normal string. This is useful for - // multi-line support. - // - // We could do this by recursively formatting a value, - // but that comes with the risk of infinite recursion - // if a marshaler returns itself. Instead we call it - // only once and rely on it returning the intended - // value directly. - switch value := value.(type) { - case string: - writeStringValue(b, value) - default: - f.formatAny(b, value) - } - case []byte: - // In https://github.com/kubernetes/klog/pull/237 it was decided - // to format byte slices with "%+q". The advantages of that are: - // - readable output if the bytes happen to be printable - // - non-printable bytes get represented as unicode escape - // sequences (\uxxxx) - // - // The downsides are that we cannot use the faster - // strconv.Quote here and that multi-line output is not - // supported. If developers know that a byte array is - // printable and they want multi-line output, they can - // convert the value to string before logging it. - b.WriteByte('=') - b.WriteString(fmt.Sprintf("%+q", v)) - default: - f.formatAny(b, v) - } -} - func KVFormat(b *bytes.Buffer, k, v interface{}) { Formatter{}.KVFormat(b, k, v) } @@ -251,6 +184,10 @@ func (f Formatter) formatAny(b *bytes.Buffer, v interface{}) { b.WriteString(f.AnyToStringHook(v)) return } + formatAsJSON(b, v) +} + +func formatAsJSON(b *bytes.Buffer, v interface{}) { encoder := json.NewEncoder(b) l := b.Len() if err := encoder.Encode(v); err != nil { diff --git a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues_no_slog.go b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues_no_slog.go new file mode 100644 index 0000000000..d9c7d15467 --- /dev/null +++ b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues_no_slog.go @@ -0,0 +1,97 @@ +//go:build !go1.21 +// +build !go1.21 + +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serialize + +import ( + "bytes" + "fmt" + + "github.com/go-logr/logr" +) + +// KVFormat serializes one key/value pair into the provided buffer. +// A space gets inserted before the pair. +func (f Formatter) KVFormat(b *bytes.Buffer, k, v interface{}) { + // This is the version without slog support. Must be kept in sync with + // the version in keyvalues_slog.go. + + b.WriteByte(' ') + // Keys are assumed to be well-formed according to + // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments + // for the sake of performance. Keys with spaces, + // special characters, etc. will break parsing. + if sK, ok := k.(string); ok { + // Avoid one allocation when the key is a string, which + // normally it should be. + b.WriteString(sK) + } else { + b.WriteString(fmt.Sprintf("%s", k)) + } + + // The type checks are sorted so that more frequently used ones + // come first because that is then faster in the common + // cases. In Kubernetes, ObjectRef (a Stringer) is more common + // than plain strings + // (https://github.com/kubernetes/kubernetes/pull/106594#issuecomment-975526235). + switch v := v.(type) { + case textWriter: + writeTextWriterValue(b, v) + case fmt.Stringer: + writeStringValue(b, StringerToString(v)) + case string: + writeStringValue(b, v) + case error: + writeStringValue(b, ErrorToString(v)) + case logr.Marshaler: + value := MarshalerToValue(v) + // A marshaler that returns a string is useful for + // delayed formatting of complex values. We treat this + // case like a normal string. This is useful for + // multi-line support. + // + // We could do this by recursively formatting a value, + // but that comes with the risk of infinite recursion + // if a marshaler returns itself. Instead we call it + // only once and rely on it returning the intended + // value directly. + switch value := value.(type) { + case string: + writeStringValue(b, value) + default: + f.formatAny(b, value) + } + case []byte: + // In https://github.com/kubernetes/klog/pull/237 it was decided + // to format byte slices with "%+q". The advantages of that are: + // - readable output if the bytes happen to be printable + // - non-printable bytes get represented as unicode escape + // sequences (\uxxxx) + // + // The downsides are that we cannot use the faster + // strconv.Quote here and that multi-line output is not + // supported. If developers know that a byte array is + // printable and they want multi-line output, they can + // convert the value to string before logging it. + b.WriteByte('=') + b.WriteString(fmt.Sprintf("%+q", v)) + default: + f.formatAny(b, v) + } +} diff --git a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues_slog.go b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues_slog.go new file mode 100644 index 0000000000..89acf97723 --- /dev/null +++ b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues_slog.go @@ -0,0 +1,155 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serialize + +import ( + "bytes" + "fmt" + "log/slog" + "strconv" + + "github.com/go-logr/logr" +) + +// KVFormat serializes one key/value pair into the provided buffer. +// A space gets inserted before the pair. +func (f Formatter) KVFormat(b *bytes.Buffer, k, v interface{}) { + // This is the version without slog support. Must be kept in sync with + // the version in keyvalues_slog.go. + + b.WriteByte(' ') + // Keys are assumed to be well-formed according to + // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments + // for the sake of performance. Keys with spaces, + // special characters, etc. will break parsing. + if sK, ok := k.(string); ok { + // Avoid one allocation when the key is a string, which + // normally it should be. + b.WriteString(sK) + } else { + b.WriteString(fmt.Sprintf("%s", k)) + } + + // The type checks are sorted so that more frequently used ones + // come first because that is then faster in the common + // cases. In Kubernetes, ObjectRef (a Stringer) is more common + // than plain strings + // (https://github.com/kubernetes/kubernetes/pull/106594#issuecomment-975526235). + // + // slog.LogValuer does not need to be handled here because the handler will + // already have resolved such special values to the final value for logging. + switch v := v.(type) { + case textWriter: + writeTextWriterValue(b, v) + case slog.Value: + // This must come before fmt.Stringer because slog.Value implements + // fmt.Stringer, but does not produce the output that we want. + b.WriteByte('=') + generateJSON(b, v) + case fmt.Stringer: + writeStringValue(b, StringerToString(v)) + case string: + writeStringValue(b, v) + case error: + writeStringValue(b, ErrorToString(v)) + case logr.Marshaler: + value := MarshalerToValue(v) + // A marshaler that returns a string is useful for + // delayed formatting of complex values. We treat this + // case like a normal string. This is useful for + // multi-line support. + // + // We could do this by recursively formatting a value, + // but that comes with the risk of infinite recursion + // if a marshaler returns itself. Instead we call it + // only once and rely on it returning the intended + // value directly. + switch value := value.(type) { + case string: + writeStringValue(b, value) + default: + f.formatAny(b, value) + } + case slog.LogValuer: + value := slog.AnyValue(v).Resolve() + if value.Kind() == slog.KindString { + writeStringValue(b, value.String()) + } else { + b.WriteByte('=') + generateJSON(b, value) + } + case []byte: + // In https://github.com/kubernetes/klog/pull/237 it was decided + // to format byte slices with "%+q". The advantages of that are: + // - readable output if the bytes happen to be printable + // - non-printable bytes get represented as unicode escape + // sequences (\uxxxx) + // + // The downsides are that we cannot use the faster + // strconv.Quote here and that multi-line output is not + // supported. If developers know that a byte array is + // printable and they want multi-line output, they can + // convert the value to string before logging it. + b.WriteByte('=') + b.WriteString(fmt.Sprintf("%+q", v)) + default: + f.formatAny(b, v) + } +} + +// generateJSON has the same preference for plain strings as KVFormat. +// In contrast to KVFormat it always produces valid JSON with no line breaks. +func generateJSON(b *bytes.Buffer, v interface{}) { + switch v := v.(type) { + case slog.Value: + switch v.Kind() { + case slog.KindGroup: + // Format as a JSON group. We must not involve f.AnyToStringHook (if there is any), + // because there is no guarantee that it produces valid JSON. + b.WriteByte('{') + for i, attr := range v.Group() { + if i > 0 { + b.WriteByte(',') + } + b.WriteString(strconv.Quote(attr.Key)) + b.WriteByte(':') + generateJSON(b, attr.Value) + } + b.WriteByte('}') + case slog.KindLogValuer: + generateJSON(b, v.Resolve()) + default: + // Peel off the slog.Value wrapper and format the actual value. + generateJSON(b, v.Any()) + } + case fmt.Stringer: + b.WriteString(strconv.Quote(StringerToString(v))) + case logr.Marshaler: + generateJSON(b, MarshalerToValue(v)) + case slog.LogValuer: + generateJSON(b, slog.AnyValue(v).Resolve().Any()) + case string: + b.WriteString(strconv.Quote(v)) + case error: + b.WriteString(strconv.Quote(v.Error())) + default: + formatAsJSON(b, v) + } +} diff --git a/vendor/k8s.io/klog/v2/internal/sloghandler/sloghandler_slog.go b/vendor/k8s.io/klog/v2/internal/sloghandler/sloghandler_slog.go new file mode 100644 index 0000000000..21f1697d09 --- /dev/null +++ b/vendor/k8s.io/klog/v2/internal/sloghandler/sloghandler_slog.go @@ -0,0 +1,96 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sloghandler + +import ( + "context" + "log/slog" + "runtime" + "strings" + "time" + + "k8s.io/klog/v2/internal/severity" +) + +func Handle(_ context.Context, record slog.Record, groups string, printWithInfos func(file string, line int, now time.Time, err error, s severity.Severity, msg string, kvList []interface{})) error { + now := record.Time + if now.IsZero() { + // This format doesn't support printing entries without a time. + now = time.Now() + } + + // slog has numeric severity levels, with 0 as default "info", negative for debugging, and + // positive with some pre-defined levels for more important. Those ranges get mapped to + // the corresponding klog levels where possible, with "info" the default that is used + // also for negative debug levels. + level := record.Level + s := severity.InfoLog + switch { + case level >= slog.LevelError: + s = severity.ErrorLog + case level >= slog.LevelWarn: + s = severity.WarningLog + } + + var file string + var line int + if record.PC != 0 { + // Same as https://cs.opensource.google/go/x/exp/+/642cacee:slog/record.go;drc=642cacee5cc05231f45555a333d07f1005ffc287;l=70 + fs := runtime.CallersFrames([]uintptr{record.PC}) + f, _ := fs.Next() + if f.File != "" { + file = f.File + if slash := strings.LastIndex(file, "/"); slash >= 0 { + file = file[slash+1:] + } + line = f.Line + } + } else { + file = "???" + line = 1 + } + + kvList := make([]interface{}, 0, 2*record.NumAttrs()) + record.Attrs(func(attr slog.Attr) bool { + kvList = appendAttr(groups, kvList, attr) + return true + }) + + printWithInfos(file, line, now, nil, s, record.Message, kvList) + return nil +} + +func Attrs2KVList(groups string, attrs []slog.Attr) []interface{} { + kvList := make([]interface{}, 0, 2*len(attrs)) + for _, attr := range attrs { + kvList = appendAttr(groups, kvList, attr) + } + return kvList +} + +func appendAttr(groups string, kvList []interface{}, attr slog.Attr) []interface{} { + var key string + if groups != "" { + key = groups + "." + attr.Key + } else { + key = attr.Key + } + return append(kvList, key, attr.Value) +} diff --git a/vendor/k8s.io/klog/v2/k8s_references_slog.go b/vendor/k8s.io/klog/v2/k8s_references_slog.go new file mode 100644 index 0000000000..5522c84c77 --- /dev/null +++ b/vendor/k8s.io/klog/v2/k8s_references_slog.go @@ -0,0 +1,39 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package klog + +import ( + "log/slog" +) + +func (ref ObjectRef) LogValue() slog.Value { + if ref.Namespace != "" { + return slog.GroupValue(slog.String("name", ref.Name), slog.String("namespace", ref.Namespace)) + } + return slog.GroupValue(slog.String("name", ref.Name)) +} + +var _ slog.LogValuer = ObjectRef{} + +func (ks kobjSlice) LogValue() slog.Value { + return slog.AnyValue(ks.MarshalLog()) +} + +var _ slog.LogValuer = kobjSlice{} diff --git a/vendor/k8s.io/klog/v2/klog.go b/vendor/k8s.io/klog/v2/klog.go index 152f8a6bd6..72502db3ae 100644 --- a/vendor/k8s.io/klog/v2/klog.go +++ b/vendor/k8s.io/klog/v2/klog.go @@ -415,7 +415,7 @@ func init() { logging.stderrThreshold = severityValue{ Severity: severity.ErrorLog, // Default stderrThreshold is ERROR. } - commandLine.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr when writing to files and stderr (no effect when -logtostderr=true or -alsologtostderr=false)") + commandLine.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr when writing to files and stderr (no effect when -logtostderr=true or -alsologtostderr=true)") commandLine.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") commandLine.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace") @@ -518,9 +518,7 @@ type settings struct { func (s settings) deepCopy() settings { // vmodule is a slice and would be shared, so we have copy it. filter := make([]modulePat, len(s.vmodule.filter)) - for i := range s.vmodule.filter { - filter[i] = s.vmodule.filter[i] - } + copy(filter, s.vmodule.filter) s.vmodule.filter = filter if s.logger != nil { @@ -657,16 +655,15 @@ func (l *loggingT) header(s severity.Severity, depth int) (*buffer.Buffer, strin } } } - return l.formatHeader(s, file, line), file, line + return l.formatHeader(s, file, line, timeNow()), file, line } // formatHeader formats a log header using the provided file name and line number. -func (l *loggingT) formatHeader(s severity.Severity, file string, line int) *buffer.Buffer { +func (l *loggingT) formatHeader(s severity.Severity, file string, line int, now time.Time) *buffer.Buffer { buf := buffer.GetBuffer() if l.skipHeaders { return buf } - now := timeNow() buf.FormatHeader(s, file, line, now) return buf } @@ -676,6 +673,10 @@ func (l *loggingT) println(s severity.Severity, logger *logWriter, filter LogFil } func (l *loggingT) printlnDepth(s severity.Severity, logger *logWriter, filter LogFilter, depth int, args ...interface{}) { + if false { + _ = fmt.Sprintln(args...) // cause vet to treat this function like fmt.Println + } + buf, file, line := l.header(s, depth) // If a logger is set and doesn't support writing a formatted buffer, // we clear the generated header as we rely on the backing @@ -696,7 +697,15 @@ func (l *loggingT) print(s severity.Severity, logger *logWriter, filter LogFilte } func (l *loggingT) printDepth(s severity.Severity, logger *logWriter, filter LogFilter, depth int, args ...interface{}) { + if false { + _ = fmt.Sprint(args...) // // cause vet to treat this function like fmt.Print + } + buf, file, line := l.header(s, depth) + l.printWithInfos(buf, file, line, s, logger, filter, depth+1, args...) +} + +func (l *loggingT) printWithInfos(buf *buffer.Buffer, file string, line int, s severity.Severity, logger *logWriter, filter LogFilter, depth int, args ...interface{}) { // If a logger is set and doesn't support writing a formatted buffer, // we clear the generated header as we rely on the backing // logger implementation to print headers. @@ -719,6 +728,10 @@ func (l *loggingT) printf(s severity.Severity, logger *logWriter, filter LogFilt } func (l *loggingT) printfDepth(s severity.Severity, logger *logWriter, filter LogFilter, depth int, format string, args ...interface{}) { + if false { + _ = fmt.Sprintf(format, args...) // cause vet to treat this function like fmt.Printf + } + buf, file, line := l.header(s, depth) // If a logger is set and doesn't support writing a formatted buffer, // we clear the generated header as we rely on the backing @@ -741,7 +754,7 @@ func (l *loggingT) printfDepth(s severity.Severity, logger *logWriter, filter Lo // alsoLogToStderr is true, the log message always appears on standard error; it // will also appear in the log file unless --logtostderr is set. func (l *loggingT) printWithFileLine(s severity.Severity, logger *logWriter, filter LogFilter, file string, line int, alsoToStderr bool, args ...interface{}) { - buf := l.formatHeader(s, file, line) + buf := l.formatHeader(s, file, line, timeNow()) // If a logger is set and doesn't support writing a formatted buffer, // we clear the generated header as we rely on the backing // logger implementation to print headers. @@ -759,7 +772,7 @@ func (l *loggingT) printWithFileLine(s severity.Severity, logger *logWriter, fil l.output(s, logger, buf, 2 /* depth */, file, line, alsoToStderr) } -// if loggr is specified, will call loggr.Error, otherwise output with logging module. +// if logger is specified, will call logger.Error, otherwise output with logging module. func (l *loggingT) errorS(err error, logger *logWriter, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) { if filter != nil { msg, keysAndValues = filter.FilterS(msg, keysAndValues) @@ -771,7 +784,7 @@ func (l *loggingT) errorS(err error, logger *logWriter, filter LogFilter, depth l.printS(err, severity.ErrorLog, depth+1, msg, keysAndValues...) } -// if loggr is specified, will call loggr.Info, otherwise output with logging module. +// if logger is specified, will call logger.Info, otherwise output with logging module. func (l *loggingT) infoS(logger *logWriter, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) { if filter != nil { msg, keysAndValues = filter.FilterS(msg, keysAndValues) @@ -783,7 +796,7 @@ func (l *loggingT) infoS(logger *logWriter, filter LogFilter, depth int, msg str l.printS(nil, severity.InfoLog, depth+1, msg, keysAndValues...) } -// printS is called from infoS and errorS if loggr is not specified. +// printS is called from infoS and errorS if logger is not specified. // set log severity by s func (l *loggingT) printS(err error, s severity.Severity, depth int, msg string, keysAndValues ...interface{}) { // Only create a new buffer if we don't have one cached. @@ -796,7 +809,7 @@ func (l *loggingT) printS(err error, s severity.Severity, depth int, msg string, serialize.KVListFormat(&b.Buffer, "err", err) } serialize.KVListFormat(&b.Buffer, keysAndValues...) - l.printDepth(s, logging.logger, nil, depth+1, &b.Buffer) + l.printDepth(s, nil, nil, depth+1, &b.Buffer) // Make the buffer available for reuse. buffer.PutBuffer(b) } @@ -873,6 +886,9 @@ func (l *loggingT) output(s severity.Severity, logger *logWriter, buf *buffer.Bu if logger.writeKlogBuffer != nil { logger.writeKlogBuffer(data) } else { + if len(data) > 0 && data[len(data)-1] == '\n' { + data = data[:len(data)-1] + } // TODO: set 'severity' and caller information as structured log info // keysAndValues := []interface{}{"severity", severityName[s], "file", file, "line", line} if s == severity.ErrorLog { @@ -897,7 +913,7 @@ func (l *loggingT) output(s severity.Severity, logger *logWriter, buf *buffer.Bu l.exit(err) } } - l.file[severity.InfoLog].Write(data) + _, _ = l.file[severity.InfoLog].Write(data) } else { if l.file[s] == nil { if err := l.createFiles(s); err != nil { @@ -907,20 +923,20 @@ func (l *loggingT) output(s severity.Severity, logger *logWriter, buf *buffer.Bu } if l.oneOutput { - l.file[s].Write(data) + _, _ = l.file[s].Write(data) } else { switch s { case severity.FatalLog: - l.file[severity.FatalLog].Write(data) + _, _ = l.file[severity.FatalLog].Write(data) fallthrough case severity.ErrorLog: - l.file[severity.ErrorLog].Write(data) + _, _ = l.file[severity.ErrorLog].Write(data) fallthrough case severity.WarningLog: - l.file[severity.WarningLog].Write(data) + _, _ = l.file[severity.WarningLog].Write(data) fallthrough case severity.InfoLog: - l.file[severity.InfoLog].Write(data) + _, _ = l.file[severity.InfoLog].Write(data) } } } @@ -946,7 +962,7 @@ func (l *loggingT) output(s severity.Severity, logger *logWriter, buf *buffer.Bu logExitFunc = func(error) {} // If we get a write error, we'll still exit below. for log := severity.FatalLog; log >= severity.InfoLog; log-- { if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set. - f.Write(trace) + _, _ = f.Write(trace) } } l.mu.Unlock() @@ -1102,7 +1118,7 @@ const flushInterval = 5 * time.Second // flushDaemon periodically flushes the log file buffers. type flushDaemon struct { mu sync.Mutex - clock clock.WithTicker + clock clock.Clock flush func() stopC chan struct{} stopDone chan struct{} @@ -1110,7 +1126,7 @@ type flushDaemon struct { // newFlushDaemon returns a new flushDaemon. If the passed clock is nil, a // clock.RealClock is used. -func newFlushDaemon(flush func(), tickClock clock.WithTicker) *flushDaemon { +func newFlushDaemon(flush func(), tickClock clock.Clock) *flushDaemon { if tickClock == nil { tickClock = clock.RealClock{} } @@ -1201,8 +1217,8 @@ func (l *loggingT) flushAll() { for s := severity.FatalLog; s >= severity.InfoLog; s-- { file := l.file[s] if file != nil { - file.Flush() // ignore error - file.Sync() // ignore error + _ = file.Flush() // ignore error + _ = file.Sync() // ignore error } } if logging.loggerOptions.flush != nil { @@ -1281,9 +1297,7 @@ func (l *loggingT) setV(pc uintptr) Level { fn := runtime.FuncForPC(pc) file, _ := fn.FileLine(pc) // The file is something like /a/b/c/d.go. We want just the d. - if strings.HasSuffix(file, ".go") { - file = file[:len(file)-3] - } + file = strings.TrimSuffix(file, ".go") if slash := strings.LastIndex(file, "/"); slash >= 0 { file = file[slash+1:] } diff --git a/vendor/k8s.io/klog/v2/klog_file.go b/vendor/k8s.io/klog/v2/klog_file.go index 1025d644f3..8bee16204d 100644 --- a/vendor/k8s.io/klog/v2/klog_file.go +++ b/vendor/k8s.io/klog/v2/klog_file.go @@ -109,8 +109,8 @@ func create(tag string, t time.Time, startup bool) (f *os.File, filename string, f, err := openOrCreate(fname, startup) if err == nil { symlink := filepath.Join(dir, link) - os.Remove(symlink) // ignore err - os.Symlink(name, symlink) // ignore err + _ = os.Remove(symlink) // ignore err + _ = os.Symlink(name, symlink) // ignore err return f, fname, nil } lastErr = err diff --git a/vendor/k8s.io/klog/v2/klogr.go b/vendor/k8s.io/klog/v2/klogr.go index 15de00e21f..efec96fd45 100644 --- a/vendor/k8s.io/klog/v2/klogr.go +++ b/vendor/k8s.io/klog/v2/klogr.go @@ -22,6 +22,11 @@ import ( "k8s.io/klog/v2/internal/serialize" ) +const ( + // nameKey is used to log the `WithName` values as an additional attribute. + nameKey = "logger" +) + // NewKlogr returns a logger that is functionally identical to // klogr.NewWithOptions(klogr.FormatKlog), i.e. it passes through to klog. The // difference is that it uses a simpler implementation. @@ -32,10 +37,15 @@ func NewKlogr() Logger { // klogger is a subset of klogr/klogr.go. It had to be copied to break an // import cycle (klogr wants to use klog, and klog wants to use klogr). type klogger struct { - level int callDepth int - prefix string - values []interface{} + + // hasPrefix is true if the first entry in values is the special + // nameKey key/value. Such an entry gets added and later updated in + // WithName. + hasPrefix bool + + values []interface{} + groups string } func (l *klogger) Init(info logr.RuntimeInfo) { @@ -44,34 +54,40 @@ func (l *klogger) Init(info logr.RuntimeInfo) { func (l *klogger) Info(level int, msg string, kvList ...interface{}) { merged := serialize.MergeKVs(l.values, kvList) - if l.prefix != "" { - msg = l.prefix + ": " + msg - } // Skip this function. VDepth(l.callDepth+1, Level(level)).InfoSDepth(l.callDepth+1, msg, merged...) } func (l *klogger) Enabled(level int) bool { - // Skip this function and logr.Logger.Info where Enabled is called. - return VDepth(l.callDepth+2, Level(level)).Enabled() + return VDepth(l.callDepth+1, Level(level)).Enabled() } func (l *klogger) Error(err error, msg string, kvList ...interface{}) { merged := serialize.MergeKVs(l.values, kvList) - if l.prefix != "" { - msg = l.prefix + ": " + msg - } ErrorSDepth(l.callDepth+1, err, msg, merged...) } // WithName returns a new logr.Logger with the specified name appended. klogr -// uses '/' characters to separate name elements. Callers should not pass '/' +// uses '.' characters to separate name elements. Callers should not pass '.' // in the provided name string, but this library does not actually enforce that. func (l klogger) WithName(name string) logr.LogSink { - if len(l.prefix) > 0 { - l.prefix = l.prefix + "/" + if l.hasPrefix { + // Copy slice and modify value. No length checks and type + // assertions are needed because hasPrefix is only true if the + // first two elements exist and are key/value strings. + v := make([]interface{}, 0, len(l.values)) + v = append(v, l.values...) + prefix, _ := v[1].(string) + v[1] = prefix + "." + name + l.values = v + } else { + // Preprend new key/value pair. + v := make([]interface{}, 0, 2+len(l.values)) + v = append(v, nameKey, name) + v = append(v, l.values...) + l.values = v + l.hasPrefix = true } - l.prefix += name return &l } diff --git a/vendor/k8s.io/klog/v2/klogr_slog.go b/vendor/k8s.io/klog/v2/klogr_slog.go new file mode 100644 index 0000000000..f7bf740306 --- /dev/null +++ b/vendor/k8s.io/klog/v2/klogr_slog.go @@ -0,0 +1,96 @@ +//go:build go1.21 +// +build go1.21 + +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package klog + +import ( + "context" + "log/slog" + "strconv" + "time" + + "github.com/go-logr/logr/slogr" + + "k8s.io/klog/v2/internal/buffer" + "k8s.io/klog/v2/internal/serialize" + "k8s.io/klog/v2/internal/severity" + "k8s.io/klog/v2/internal/sloghandler" +) + +func (l *klogger) Handle(ctx context.Context, record slog.Record) error { + if logging.logger != nil { + if slogSink, ok := logging.logger.GetSink().(slogr.SlogSink); ok { + // Let that logger do the work. + return slogSink.Handle(ctx, record) + } + } + + return sloghandler.Handle(ctx, record, l.groups, slogOutput) +} + +// slogOutput corresponds to several different functions in klog.go. +// It goes through some of the same checks and formatting steps before +// it ultimately converges by calling logging.printWithInfos. +func slogOutput(file string, line int, now time.Time, err error, s severity.Severity, msg string, kvList []interface{}) { + // See infoS. + if logging.logger != nil { + // Taking this path happens when klog has a logger installed + // as backend which doesn't support slog. Not good, we have to + // guess about the call depth and drop the actual location. + logger := logging.logger.WithCallDepth(2) + if s > severity.ErrorLog { + logger.Error(err, msg, kvList...) + } else { + logger.Info(msg, kvList...) + } + return + } + + // See printS. + b := buffer.GetBuffer() + b.WriteString(strconv.Quote(msg)) + if err != nil { + serialize.KVListFormat(&b.Buffer, "err", err) + } + serialize.KVListFormat(&b.Buffer, kvList...) + + // See print + header. + buf := logging.formatHeader(s, file, line, now) + logging.printWithInfos(buf, file, line, s, nil, nil, 0, &b.Buffer) + + buffer.PutBuffer(b) +} + +func (l *klogger) WithAttrs(attrs []slog.Attr) slogr.SlogSink { + clone := *l + clone.values = serialize.WithValues(l.values, sloghandler.Attrs2KVList(l.groups, attrs)) + return &clone +} + +func (l *klogger) WithGroup(name string) slogr.SlogSink { + clone := *l + if clone.groups != "" { + clone.groups += "." + name + } else { + clone.groups = name + } + return &clone +} + +var _ slogr.SlogSink = &klogger{} diff --git a/vendor/modules.txt b/vendor/modules.txt index b7386df9dd..361a46d320 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -701,6 +701,7 @@ github.com/go-logfmt/logfmt ## explicit; go 1.18 github.com/go-logr/logr github.com/go-logr/logr/funcr +github.com/go-logr/logr/slogr # github.com/go-logr/stdr v1.2.2 ## explicit; go 1.16 github.com/go-logr/stdr @@ -1729,7 +1730,7 @@ github.com/t-yuki/gocover-cobertura # github.com/tdakkota/asciicheck v0.2.0 ## explicit; go 1.18 github.com/tdakkota/asciicheck -# github.com/tektoncd/pipeline v0.55.0 +# github.com/tektoncd/pipeline v0.55.1-0.20240105143253-fe47c9bc893a ## explicit; go 1.19 github.com/tektoncd/pipeline/pkg/apis/config github.com/tektoncd/pipeline/pkg/apis/config/resolver @@ -1782,12 +1783,8 @@ github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/clu github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/clustertask/fake github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/customrun github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/customrun/fake -github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/pipelinerun -github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/pipelinerun/fake -github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/taskrun -github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/taskrun/fake -github.com/tektoncd/pipeline/pkg/client/injection/reconciler/pipeline/v1beta1/pipelinerun -github.com/tektoncd/pipeline/pkg/client/injection/reconciler/pipeline/v1beta1/taskrun +github.com/tektoncd/pipeline/pkg/client/injection/reconciler/pipeline/v1/pipelinerun +github.com/tektoncd/pipeline/pkg/client/injection/reconciler/pipeline/v1/taskrun github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1 github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1 github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1beta1 @@ -1934,7 +1931,7 @@ gitlab.com/bosi/decorder # go-simpler.org/sloglint v0.1.2 ## explicit; go 1.20 go-simpler.org/sloglint -# go.mongodb.org/mongo-driver v1.13.0 +# go.mongodb.org/mongo-driver v1.13.1 ## explicit; go 1.13 go.mongodb.org/mongo-driver/bson go.mongodb.org/mongo-driver/bson/bsoncodec @@ -2619,8 +2616,8 @@ k8s.io/api/storage/v1beta1 ## explicit; go 1.19 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 -# k8s.io/apimachinery v0.28.3 -## explicit; go 1.20 +# k8s.io/apimachinery v0.29.0 +## explicit; go 1.21 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors k8s.io/apimachinery/pkg/api/meta @@ -2992,7 +2989,7 @@ k8s.io/gengo/generator k8s.io/gengo/namer k8s.io/gengo/parser k8s.io/gengo/types -# k8s.io/klog/v2 v2.100.1 +# k8s.io/klog/v2 v2.110.1 ## explicit; go 1.13 k8s.io/klog/v2 k8s.io/klog/v2/internal/buffer @@ -3000,6 +2997,7 @@ k8s.io/klog/v2/internal/clock k8s.io/klog/v2/internal/dbg k8s.io/klog/v2/internal/serialize k8s.io/klog/v2/internal/severity +k8s.io/klog/v2/internal/sloghandler # k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 ## explicit; go 1.19 k8s.io/kube-openapi/pkg/cached @@ -3113,7 +3111,7 @@ sigs.k8s.io/json/internal/golang/encoding/json # sigs.k8s.io/release-utils v0.7.7 ## explicit; go 1.20 sigs.k8s.io/release-utils/version -# sigs.k8s.io/structured-merge-diff/v4 v4.3.0 +# sigs.k8s.io/structured-merge-diff/v4 v4.4.1 ## explicit; go 1.13 sigs.k8s.io/structured-merge-diff/v4/fieldpath sigs.k8s.io/structured-merge-diff/v4/merge diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/pathelementmap.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/pathelementmap.go index 9b14ca581b..41fc2474a4 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/pathelementmap.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/pathelementmap.go @@ -28,20 +28,15 @@ import ( // for PathElementSet and SetNodeMap, so we could probably share the // code. type PathElementValueMap struct { - members sortedPathElementValues + valueMap PathElementMap } func MakePathElementValueMap(size int) PathElementValueMap { return PathElementValueMap{ - members: make(sortedPathElementValues, 0, size), + valueMap: MakePathElementMap(size), } } -type pathElementValue struct { - PathElement PathElement - Value value.Value -} - type sortedPathElementValues []pathElementValue // Implement the sort interface; this would permit bulk creation, which would @@ -53,7 +48,40 @@ func (spev sortedPathElementValues) Less(i, j int) bool { func (spev sortedPathElementValues) Swap(i, j int) { spev[i], spev[j] = spev[j], spev[i] } // Insert adds the pathelement and associated value in the map. +// If insert is called twice with the same PathElement, the value is replaced. func (s *PathElementValueMap) Insert(pe PathElement, v value.Value) { + s.valueMap.Insert(pe, v) +} + +// Get retrieves the value associated with the given PathElement from the map. +// (nil, false) is returned if there is no such PathElement. +func (s *PathElementValueMap) Get(pe PathElement) (value.Value, bool) { + v, ok := s.valueMap.Get(pe) + if !ok { + return nil, false + } + return v.(value.Value), true +} + +// PathElementValueMap is a map from PathElement to interface{}. +type PathElementMap struct { + members sortedPathElementValues +} + +type pathElementValue struct { + PathElement PathElement + Value interface{} +} + +func MakePathElementMap(size int) PathElementMap { + return PathElementMap{ + members: make(sortedPathElementValues, 0, size), + } +} + +// Insert adds the pathelement and associated value in the map. +// If insert is called twice with the same PathElement, the value is replaced. +func (s *PathElementMap) Insert(pe PathElement, v interface{}) { loc := sort.Search(len(s.members), func(i int) bool { return !s.members[i].PathElement.Less(pe) }) @@ -62,6 +90,7 @@ func (s *PathElementValueMap) Insert(pe PathElement, v value.Value) { return } if s.members[loc].PathElement.Equals(pe) { + s.members[loc].Value = v return } s.members = append(s.members, pathElementValue{}) @@ -71,7 +100,7 @@ func (s *PathElementValueMap) Insert(pe PathElement, v value.Value) { // Get retrieves the value associated with the given PathElement from the map. // (nil, false) is returned if there is no such PathElement. -func (s *PathElementValueMap) Get(pe PathElement) (value.Value, bool) { +func (s *PathElementMap) Get(pe PathElement) (interface{}, bool) { loc := sort.Search(len(s.members), func(i int) bool { return !s.members[i].PathElement.Less(pe) }) diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/update.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/update.go index e1540841d8..d5a977d607 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/update.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/update.go @@ -34,8 +34,6 @@ type UpdaterBuilder struct { Converter Converter IgnoredFields map[fieldpath.APIVersion]*fieldpath.Set - EnableUnions bool - // Stop comparing the new object with old object after applying. // This was initially used to avoid spurious etcd update, but // since that's vastly inefficient, we've come-up with a better @@ -49,7 +47,6 @@ func (u *UpdaterBuilder) BuildUpdater() *Updater { return &Updater{ Converter: u.Converter, IgnoredFields: u.IgnoredFields, - enableUnions: u.EnableUnions, returnInputOnNoop: u.ReturnInputOnNoop, } } @@ -63,19 +60,9 @@ type Updater struct { // Deprecated: This will eventually become private. IgnoredFields map[fieldpath.APIVersion]*fieldpath.Set - enableUnions bool - returnInputOnNoop bool } -// EnableUnionFeature turns on union handling. It is disabled by default until the -// feature is complete. -// -// Deprecated: Use the builder instead. -func (s *Updater) EnableUnionFeature() { - s.enableUnions = true -} - func (s *Updater) update(oldObject, newObject *typed.TypedValue, version fieldpath.APIVersion, managers fieldpath.ManagedFields, workflow string, force bool) (fieldpath.ManagedFields, *typed.Comparison, error) { conflicts := fieldpath.ManagedFields{} removed := fieldpath.ManagedFields{} @@ -160,12 +147,6 @@ func (s *Updater) Update(liveObject, newObject *typed.TypedValue, version fieldp if err != nil { return nil, fieldpath.ManagedFields{}, err } - if s.enableUnions { - newObject, err = liveObject.NormalizeUnions(newObject) - if err != nil { - return nil, fieldpath.ManagedFields{}, err - } - } managers, compare, err := s.update(liveObject, newObject, version, managers, manager, true) if err != nil { return nil, fieldpath.ManagedFields{}, err @@ -179,7 +160,7 @@ func (s *Updater) Update(liveObject, newObject *typed.TypedValue, version fieldp ignored = fieldpath.NewSet() } managers[manager] = fieldpath.NewVersionedSet( - managers[manager].Set().Union(compare.Modified).Union(compare.Added).Difference(compare.Removed).RecursiveDifference(ignored), + managers[manager].Set().Difference(compare.Removed).Union(compare.Modified).Union(compare.Added).RecursiveDifference(ignored), version, false, ) @@ -198,22 +179,10 @@ func (s *Updater) Apply(liveObject, configObject *typed.TypedValue, version fiel if err != nil { return nil, fieldpath.ManagedFields{}, err } - if s.enableUnions { - configObject, err = configObject.NormalizeUnionsApply(configObject) - if err != nil { - return nil, fieldpath.ManagedFields{}, err - } - } newObject, err := liveObject.Merge(configObject) if err != nil { return nil, fieldpath.ManagedFields{}, fmt.Errorf("failed to merge config: %v", err) } - if s.enableUnions { - newObject, err = configObject.NormalizeUnionsApply(newObject) - if err != nil { - return nil, fieldpath.ManagedFields{}, err - } - } lastSet := managers[manager] set, err := configObject.ToFieldSet() if err != nil { diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/schemaschema.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/schemaschema.go index e4c5caa2aa..6eb6c36df3 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/schemaschema.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/schema/schemaschema.go @@ -145,6 +145,7 @@ var SchemaSchemaYAML = `types: list: elementType: scalar: string + elementRelationship: atomic - name: untyped map: fields: diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/compare.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/compare.go new file mode 100644 index 0000000000..ed483cbbc4 --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/compare.go @@ -0,0 +1,460 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package typed + +import ( + "fmt" + "strings" + + "sigs.k8s.io/structured-merge-diff/v4/fieldpath" + "sigs.k8s.io/structured-merge-diff/v4/schema" + "sigs.k8s.io/structured-merge-diff/v4/value" +) + +// Comparison is the return value of a TypedValue.Compare() operation. +// +// No field will appear in more than one of the three fieldsets. If all of the +// fieldsets are empty, then the objects must have been equal. +type Comparison struct { + // Removed contains any fields removed by rhs (the right-hand-side + // object in the comparison). + Removed *fieldpath.Set + // Modified contains fields present in both objects but different. + Modified *fieldpath.Set + // Added contains any fields added by rhs. + Added *fieldpath.Set +} + +// IsSame returns true if the comparison returned no changes (the two +// compared objects are similar). +func (c *Comparison) IsSame() bool { + return c.Removed.Empty() && c.Modified.Empty() && c.Added.Empty() +} + +// String returns a human readable version of the comparison. +func (c *Comparison) String() string { + bld := strings.Builder{} + if !c.Modified.Empty() { + bld.WriteString(fmt.Sprintf("- Modified Fields:\n%v\n", c.Modified)) + } + if !c.Added.Empty() { + bld.WriteString(fmt.Sprintf("- Added Fields:\n%v\n", c.Added)) + } + if !c.Removed.Empty() { + bld.WriteString(fmt.Sprintf("- Removed Fields:\n%v\n", c.Removed)) + } + return bld.String() +} + +// ExcludeFields fields from the compare recursively removes the fields +// from the entire comparison +func (c *Comparison) ExcludeFields(fields *fieldpath.Set) *Comparison { + if fields == nil || fields.Empty() { + return c + } + c.Removed = c.Removed.RecursiveDifference(fields) + c.Modified = c.Modified.RecursiveDifference(fields) + c.Added = c.Added.RecursiveDifference(fields) + return c +} + +type compareWalker struct { + lhs value.Value + rhs value.Value + schema *schema.Schema + typeRef schema.TypeRef + + // Current path that we are comparing + path fieldpath.Path + + // Resulting comparison. + comparison *Comparison + + // internal housekeeping--don't set when constructing. + inLeaf bool // Set to true if we're in a "big leaf"--atomic map/list + + // Allocate only as many walkers as needed for the depth by storing them here. + spareWalkers *[]*compareWalker + + allocator value.Allocator +} + +// compare compares stuff. +func (w *compareWalker) compare(prefixFn func() string) (errs ValidationErrors) { + if w.lhs == nil && w.rhs == nil { + // check this condidition here instead of everywhere below. + return errorf("at least one of lhs and rhs must be provided") + } + a, ok := w.schema.Resolve(w.typeRef) + if !ok { + return errorf("schema error: no type found matching: %v", *w.typeRef.NamedType) + } + + alhs := deduceAtom(a, w.lhs) + arhs := deduceAtom(a, w.rhs) + + // deduceAtom does not fix the type for nil values + // nil is a wildcard and will accept whatever form the other operand takes + if w.rhs == nil { + errs = append(errs, handleAtom(alhs, w.typeRef, w)...) + } else if w.lhs == nil || alhs.Equals(&arhs) { + errs = append(errs, handleAtom(arhs, w.typeRef, w)...) + } else { + w2 := *w + errs = append(errs, handleAtom(alhs, w.typeRef, &w2)...) + errs = append(errs, handleAtom(arhs, w.typeRef, w)...) + } + + if !w.inLeaf { + if w.lhs == nil { + w.comparison.Added.Insert(w.path) + } else if w.rhs == nil { + w.comparison.Removed.Insert(w.path) + } + } + return errs.WithLazyPrefix(prefixFn) +} + +// doLeaf should be called on leaves before descending into children, if there +// will be a descent. It modifies w.inLeaf. +func (w *compareWalker) doLeaf() { + if w.inLeaf { + // We're in a "big leaf", an atomic map or list. Ignore + // subsequent leaves. + return + } + w.inLeaf = true + + // We don't recurse into leaf fields for merging. + if w.lhs == nil { + w.comparison.Added.Insert(w.path) + } else if w.rhs == nil { + w.comparison.Removed.Insert(w.path) + } else if !value.EqualsUsing(w.allocator, w.rhs, w.lhs) { + // TODO: Equality is not sufficient for this. + // Need to implement equality check on the value type. + w.comparison.Modified.Insert(w.path) + } +} + +func (w *compareWalker) doScalar(t *schema.Scalar) ValidationErrors { + // Make sure at least one side is a valid scalar. + lerrs := validateScalar(t, w.lhs, "lhs: ") + rerrs := validateScalar(t, w.rhs, "rhs: ") + if len(lerrs) > 0 && len(rerrs) > 0 { + return append(lerrs, rerrs...) + } + + // All scalars are leaf fields. + w.doLeaf() + + return nil +} + +func (w *compareWalker) prepareDescent(pe fieldpath.PathElement, tr schema.TypeRef, cmp *Comparison) *compareWalker { + if w.spareWalkers == nil { + // first descent. + w.spareWalkers = &[]*compareWalker{} + } + var w2 *compareWalker + if n := len(*w.spareWalkers); n > 0 { + w2, *w.spareWalkers = (*w.spareWalkers)[n-1], (*w.spareWalkers)[:n-1] + } else { + w2 = &compareWalker{} + } + *w2 = *w + w2.typeRef = tr + w2.path = append(w2.path, pe) + w2.lhs = nil + w2.rhs = nil + w2.comparison = cmp + return w2 +} + +func (w *compareWalker) finishDescent(w2 *compareWalker) { + // if the descent caused a realloc, ensure that we reuse the buffer + // for the next sibling. + w.path = w2.path[:len(w2.path)-1] + *w.spareWalkers = append(*w.spareWalkers, w2) +} + +func (w *compareWalker) derefMap(prefix string, v value.Value) (value.Map, ValidationErrors) { + if v == nil { + return nil, nil + } + m, err := mapValue(w.allocator, v) + if err != nil { + return nil, errorf("%v: %v", prefix, err) + } + return m, nil +} + +func (w *compareWalker) visitListItems(t *schema.List, lhs, rhs value.List) (errs ValidationErrors) { + rLen := 0 + if rhs != nil { + rLen = rhs.Length() + } + lLen := 0 + if lhs != nil { + lLen = lhs.Length() + } + + maxLength := rLen + if lLen > maxLength { + maxLength = lLen + } + // Contains all the unique PEs between lhs and rhs, exactly once. + // Order doesn't matter since we're just tracking ownership in a set. + allPEs := make([]fieldpath.PathElement, 0, maxLength) + + // Gather all the elements from lhs, indexed by PE, in a list for duplicates. + lValues := fieldpath.MakePathElementMap(lLen) + for i := 0; i < lLen; i++ { + child := lhs.At(i) + pe, err := listItemToPathElement(w.allocator, w.schema, t, child) + if err != nil { + errs = append(errs, errorf("element %v: %v", i, err.Error())...) + // If we can't construct the path element, we can't + // even report errors deeper in the schema, so bail on + // this element. + continue + } + + if v, found := lValues.Get(pe); found { + list := v.([]value.Value) + lValues.Insert(pe, append(list, child)) + } else { + lValues.Insert(pe, []value.Value{child}) + allPEs = append(allPEs, pe) + } + } + + // Gather all the elements from rhs, indexed by PE, in a list for duplicates. + rValues := fieldpath.MakePathElementMap(rLen) + for i := 0; i < rLen; i++ { + rValue := rhs.At(i) + pe, err := listItemToPathElement(w.allocator, w.schema, t, rValue) + if err != nil { + errs = append(errs, errorf("element %v: %v", i, err.Error())...) + // If we can't construct the path element, we can't + // even report errors deeper in the schema, so bail on + // this element. + continue + } + if v, found := rValues.Get(pe); found { + list := v.([]value.Value) + rValues.Insert(pe, append(list, rValue)) + } else { + rValues.Insert(pe, []value.Value{rValue}) + if _, found := lValues.Get(pe); !found { + allPEs = append(allPEs, pe) + } + } + } + + for _, pe := range allPEs { + lList := []value.Value(nil) + if l, ok := lValues.Get(pe); ok { + lList = l.([]value.Value) + } + rList := []value.Value(nil) + if l, ok := rValues.Get(pe); ok { + rList = l.([]value.Value) + } + + switch { + case len(lList) == 0 && len(rList) == 0: + // We shouldn't be here anyway. + return + // Normal use-case: + // We have no duplicates for this PE, compare items one-to-one. + case len(lList) <= 1 && len(rList) <= 1: + lValue := value.Value(nil) + if len(lList) != 0 { + lValue = lList[0] + } + rValue := value.Value(nil) + if len(rList) != 0 { + rValue = rList[0] + } + errs = append(errs, w.compareListItem(t, pe, lValue, rValue)...) + // Duplicates before & after use-case: + // Compare the duplicates lists as if they were atomic, mark modified if they changed. + case len(lList) >= 2 && len(rList) >= 2: + listEqual := func(lList, rList []value.Value) bool { + if len(lList) != len(rList) { + return false + } + for i := range lList { + if !value.Equals(lList[i], rList[i]) { + return false + } + } + return true + } + if !listEqual(lList, rList) { + w.comparison.Modified.Insert(append(w.path, pe)) + } + // Duplicates before & not anymore use-case: + // Rcursively add new non-duplicate items, Remove duplicate marker, + case len(lList) >= 2: + if len(rList) != 0 { + errs = append(errs, w.compareListItem(t, pe, nil, rList[0])...) + } + w.comparison.Removed.Insert(append(w.path, pe)) + // New duplicates use-case: + // Recursively remove old non-duplicate items, add duplicate marker. + case len(rList) >= 2: + if len(lList) != 0 { + errs = append(errs, w.compareListItem(t, pe, lList[0], nil)...) + } + w.comparison.Added.Insert(append(w.path, pe)) + } + } + + return +} + +func (w *compareWalker) indexListPathElements(t *schema.List, list value.List) ([]fieldpath.PathElement, fieldpath.PathElementValueMap, ValidationErrors) { + var errs ValidationErrors + length := 0 + if list != nil { + length = list.Length() + } + observed := fieldpath.MakePathElementValueMap(length) + pes := make([]fieldpath.PathElement, 0, length) + for i := 0; i < length; i++ { + child := list.At(i) + pe, err := listItemToPathElement(w.allocator, w.schema, t, child) + if err != nil { + errs = append(errs, errorf("element %v: %v", i, err.Error())...) + // If we can't construct the path element, we can't + // even report errors deeper in the schema, so bail on + // this element. + continue + } + // Ignore repeated occurences of `pe`. + if _, found := observed.Get(pe); found { + continue + } + observed.Insert(pe, child) + pes = append(pes, pe) + } + return pes, observed, errs +} + +func (w *compareWalker) compareListItem(t *schema.List, pe fieldpath.PathElement, lChild, rChild value.Value) ValidationErrors { + w2 := w.prepareDescent(pe, t.ElementType, w.comparison) + w2.lhs = lChild + w2.rhs = rChild + errs := w2.compare(pe.String) + w.finishDescent(w2) + return errs +} + +func (w *compareWalker) derefList(prefix string, v value.Value) (value.List, ValidationErrors) { + if v == nil { + return nil, nil + } + l, err := listValue(w.allocator, v) + if err != nil { + return nil, errorf("%v: %v", prefix, err) + } + return l, nil +} + +func (w *compareWalker) doList(t *schema.List) (errs ValidationErrors) { + lhs, _ := w.derefList("lhs: ", w.lhs) + if lhs != nil { + defer w.allocator.Free(lhs) + } + rhs, _ := w.derefList("rhs: ", w.rhs) + if rhs != nil { + defer w.allocator.Free(rhs) + } + + // If both lhs and rhs are empty/null, treat it as a + // leaf: this helps preserve the empty/null + // distinction. + emptyPromoteToLeaf := (lhs == nil || lhs.Length() == 0) && (rhs == nil || rhs.Length() == 0) + + if t.ElementRelationship == schema.Atomic || emptyPromoteToLeaf { + w.doLeaf() + return nil + } + + if lhs == nil && rhs == nil { + return nil + } + + errs = w.visitListItems(t, lhs, rhs) + + return errs +} + +func (w *compareWalker) visitMapItem(t *schema.Map, out map[string]interface{}, key string, lhs, rhs value.Value) (errs ValidationErrors) { + fieldType := t.ElementType + if sf, ok := t.FindField(key); ok { + fieldType = sf.Type + } + pe := fieldpath.PathElement{FieldName: &key} + w2 := w.prepareDescent(pe, fieldType, w.comparison) + w2.lhs = lhs + w2.rhs = rhs + errs = append(errs, w2.compare(pe.String)...) + w.finishDescent(w2) + return errs +} + +func (w *compareWalker) visitMapItems(t *schema.Map, lhs, rhs value.Map) (errs ValidationErrors) { + out := map[string]interface{}{} + + value.MapZipUsing(w.allocator, lhs, rhs, value.Unordered, func(key string, lhsValue, rhsValue value.Value) bool { + errs = append(errs, w.visitMapItem(t, out, key, lhsValue, rhsValue)...) + return true + }) + + return errs +} + +func (w *compareWalker) doMap(t *schema.Map) (errs ValidationErrors) { + lhs, _ := w.derefMap("lhs: ", w.lhs) + if lhs != nil { + defer w.allocator.Free(lhs) + } + rhs, _ := w.derefMap("rhs: ", w.rhs) + if rhs != nil { + defer w.allocator.Free(rhs) + } + // If both lhs and rhs are empty/null, treat it as a + // leaf: this helps preserve the empty/null + // distinction. + emptyPromoteToLeaf := (lhs == nil || lhs.Empty()) && (rhs == nil || rhs.Empty()) + + if t.ElementRelationship == schema.Atomic || emptyPromoteToLeaf { + w.doLeaf() + return nil + } + + if lhs == nil && rhs == nil { + return nil + } + + errs = append(errs, w.visitMapItems(t, lhs, rhs)...) + + return errs +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/helpers.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/helpers.go index 19c77334f6..78fdb0e75f 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/helpers.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/helpers.go @@ -197,7 +197,7 @@ func getAssociativeKeyDefault(s *schema.Schema, list *schema.List, fieldName str return field.Default, nil } -func keyedAssociativeListItemToPathElement(a value.Allocator, s *schema.Schema, list *schema.List, index int, child value.Value) (fieldpath.PathElement, error) { +func keyedAssociativeListItemToPathElement(a value.Allocator, s *schema.Schema, list *schema.List, child value.Value) (fieldpath.PathElement, error) { pe := fieldpath.PathElement{} if child.IsNull() { // null entries are illegal. @@ -225,7 +225,7 @@ func keyedAssociativeListItemToPathElement(a value.Allocator, s *schema.Schema, return pe, nil } -func setItemToPathElement(list *schema.List, index int, child value.Value) (fieldpath.PathElement, error) { +func setItemToPathElement(child value.Value) (fieldpath.PathElement, error) { pe := fieldpath.PathElement{} switch { case child.IsMap(): @@ -245,16 +245,15 @@ func setItemToPathElement(list *schema.List, index int, child value.Value) (fiel } } -func listItemToPathElement(a value.Allocator, s *schema.Schema, list *schema.List, index int, child value.Value) (fieldpath.PathElement, error) { - if list.ElementRelationship == schema.Associative { - if len(list.Keys) > 0 { - return keyedAssociativeListItemToPathElement(a, s, list, index, child) - } +func listItemToPathElement(a value.Allocator, s *schema.Schema, list *schema.List, child value.Value) (fieldpath.PathElement, error) { + if list.ElementRelationship != schema.Associative { + return fieldpath.PathElement{}, errors.New("invalid indexing of non-associative list") + } - // If there's no keys, then we must be a set of primitives. - return setItemToPathElement(list, index, child) + if len(list.Keys) > 0 { + return keyedAssociativeListItemToPathElement(a, s, list, child) } - // Use the index as a key for atomic lists. - return fieldpath.PathElement{Index: &index}, nil + // If there's no keys, then we must be a set of primitives. + return setItemToPathElement(child) } diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/merge.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/merge.go index 09209ec82a..fa227ac405 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/merge.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/merge.go @@ -180,14 +180,18 @@ func (w *mergingWalker) visitListItems(t *schema.List, lhs, rhs value.List) (err } out := make([]interface{}, 0, outLen) - rhsOrder, observedRHS, rhsErrs := w.indexListPathElements(t, rhs) + rhsPEs, observedRHS, rhsErrs := w.indexListPathElements(t, rhs, false) errs = append(errs, rhsErrs...) - lhsOrder, observedLHS, lhsErrs := w.indexListPathElements(t, lhs) + lhsPEs, observedLHS, lhsErrs := w.indexListPathElements(t, lhs, true) errs = append(errs, lhsErrs...) + if len(errs) != 0 { + return errs + } + sharedOrder := make([]*fieldpath.PathElement, 0, rLen) - for i := range rhsOrder { - pe := &rhsOrder[i] + for i := range rhsPEs { + pe := &rhsPEs[i] if _, ok := observedLHS.Get(*pe); ok { sharedOrder = append(sharedOrder, pe) } @@ -199,13 +203,15 @@ func (w *mergingWalker) visitListItems(t *schema.List, lhs, rhs value.List) (err sharedOrder = sharedOrder[1:] } - lLen, rLen = len(lhsOrder), len(rhsOrder) + mergedRHS := fieldpath.MakePathElementMap(len(rhsPEs)) + lLen, rLen = len(lhsPEs), len(rhsPEs) for lI, rI := 0, 0; lI < lLen || rI < rLen; { if lI < lLen && rI < rLen { - pe := lhsOrder[lI] - if pe.Equals(rhsOrder[rI]) { + pe := lhsPEs[lI] + if pe.Equals(rhsPEs[rI]) { // merge LHS & RHS items - lChild, _ := observedLHS.Get(pe) + mergedRHS.Insert(pe, struct{}{}) + lChild, _ := observedLHS.Get(pe) // may be nil if the PE is duplicaated. rChild, _ := observedRHS.Get(pe) mergeOut, errs := w.mergeListItem(t, pe, lChild, rChild) errs = append(errs, errs...) @@ -222,17 +228,17 @@ func (w *mergingWalker) visitListItems(t *schema.List, lhs, rhs value.List) (err } continue } - if _, ok := observedRHS.Get(pe); ok && nextShared != nil && !nextShared.Equals(lhsOrder[lI]) { + if _, ok := observedRHS.Get(pe); ok && nextShared != nil && !nextShared.Equals(lhsPEs[lI]) { // shared item, but not the one we want in this round lI++ continue } } if lI < lLen { - pe := lhsOrder[lI] + pe := lhsPEs[lI] if _, ok := observedRHS.Get(pe); !ok { - // take LHS item - lChild, _ := observedLHS.Get(pe) + // take LHS item using At to make sure we get the right item (observed may not contain the right item). + lChild := lhs.AtUsing(w.allocator, lI) mergeOut, errs := w.mergeListItem(t, pe, lChild, nil) errs = append(errs, errs...) if mergeOut != nil { @@ -240,12 +246,16 @@ func (w *mergingWalker) visitListItems(t *schema.List, lhs, rhs value.List) (err } lI++ continue + } else if _, ok := mergedRHS.Get(pe); ok { + // we've already merged it with RHS, we don't want to duplicate it, skip it. + lI++ } } if rI < rLen { // Take the RHS item, merge with matching LHS item if possible - pe := rhsOrder[rI] - lChild, _ := observedLHS.Get(pe) // may be nil + pe := rhsPEs[rI] + mergedRHS.Insert(pe, struct{}{}) + lChild, _ := observedLHS.Get(pe) // may be nil if absent or duplicaated. rChild, _ := observedRHS.Get(pe) mergeOut, errs := w.mergeListItem(t, pe, lChild, rChild) errs = append(errs, errs...) @@ -272,7 +282,7 @@ func (w *mergingWalker) visitListItems(t *schema.List, lhs, rhs value.List) (err return errs } -func (w *mergingWalker) indexListPathElements(t *schema.List, list value.List) ([]fieldpath.PathElement, fieldpath.PathElementValueMap, ValidationErrors) { +func (w *mergingWalker) indexListPathElements(t *schema.List, list value.List, allowDuplicates bool) ([]fieldpath.PathElement, fieldpath.PathElementValueMap, ValidationErrors) { var errs ValidationErrors length := 0 if list != nil { @@ -282,7 +292,7 @@ func (w *mergingWalker) indexListPathElements(t *schema.List, list value.List) ( pes := make([]fieldpath.PathElement, 0, length) for i := 0; i < length; i++ { child := list.At(i) - pe, err := listItemToPathElement(w.allocator, w.schema, t, i, child) + pe, err := listItemToPathElement(w.allocator, w.schema, t, child) if err != nil { errs = append(errs, errorf("element %v: %v", i, err.Error())...) // If we can't construct the path element, we can't @@ -290,11 +300,15 @@ func (w *mergingWalker) indexListPathElements(t *schema.List, list value.List) ( // this element. continue } - if _, found := observed.Get(pe); found { + if _, found := observed.Get(pe); found && !allowDuplicates { errs = append(errs, errorf("duplicate entries for key %v", pe.String())...) continue + } else if !found { + observed.Insert(pe, child) + } else { + // Duplicated items are not merged with the new value, make them nil. + observed.Insert(pe, value.NewValueInterface(nil)) } - observed.Insert(pe, child) pes = append(pes, pe) } return pes, observed, errs diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/parser.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/parser.go index 3949a78fc6..4258ee5bab 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/parser.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/parser.go @@ -93,13 +93,13 @@ func (p ParseableType) IsValid() bool { // FromYAML parses a yaml string into an object with the current schema // and the type "typename" or an error if validation fails. -func (p ParseableType) FromYAML(object YAMLObject) (*TypedValue, error) { +func (p ParseableType) FromYAML(object YAMLObject, opts ...ValidationOptions) (*TypedValue, error) { var v interface{} err := yaml.Unmarshal([]byte(object), &v) if err != nil { return nil, err } - return AsTyped(value.NewValueInterface(v), p.Schema, p.TypeRef) + return AsTyped(value.NewValueInterface(v), p.Schema, p.TypeRef, opts...) } // FromUnstructured converts a go "interface{}" type, typically an @@ -108,8 +108,8 @@ func (p ParseableType) FromYAML(object YAMLObject) (*TypedValue, error) { // The provided interface{} must be one of: map[string]interface{}, // map[interface{}]interface{}, []interface{}, int types, float types, // string or boolean. Nested interface{} must also be one of these types. -func (p ParseableType) FromUnstructured(in interface{}) (*TypedValue, error) { - return AsTyped(value.NewValueInterface(in), p.Schema, p.TypeRef) +func (p ParseableType) FromUnstructured(in interface{}, opts ...ValidationOptions) (*TypedValue, error) { + return AsTyped(value.NewValueInterface(in), p.Schema, p.TypeRef, opts...) } // FromStructured converts a go "interface{}" type, typically an structured object in @@ -117,12 +117,12 @@ func (p ParseableType) FromUnstructured(in interface{}) (*TypedValue, error) { // schema validation. The provided "interface{}" value must be a pointer so that the // value can be modified via reflection. The provided "interface{}" may contain structs // and types that are converted to Values by the jsonMarshaler interface. -func (p ParseableType) FromStructured(in interface{}) (*TypedValue, error) { +func (p ParseableType) FromStructured(in interface{}, opts ...ValidationOptions) (*TypedValue, error) { v, err := value.NewValueReflect(in) if err != nil { return nil, fmt.Errorf("error creating struct value reflector: %v", err) } - return AsTyped(v, p.Schema, p.TypeRef) + return AsTyped(v, p.Schema, p.TypeRef, opts...) } // DeducedParseableType is a ParseableType that deduces the type from diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/remove.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/remove.go index a338d761d4..ad071ee8f3 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/remove.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/remove.go @@ -74,9 +74,9 @@ func (w *removingWalker) doList(t *schema.List) (errs ValidationErrors) { iter := l.RangeUsing(w.allocator) defer w.allocator.Free(iter) for iter.Next() { - i, item := iter.Item() + _, item := iter.Item() // Ignore error because we have already validated this list - pe, _ := listItemToPathElement(w.allocator, w.schema, t, i, item) + pe, _ := listItemToPathElement(w.allocator, w.schema, t, item) path, _ := fieldpath.MakePath(pe) // save items on the path when we shouldExtract // but ignore them when we are removing (i.e. !w.shouldExtract) diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/tofieldset.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/tofieldset.go index 047efff053..d563a87ee6 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/tofieldset.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/tofieldset.go @@ -94,9 +94,31 @@ func (v *toFieldSetWalker) doScalar(t *schema.Scalar) ValidationErrors { } func (v *toFieldSetWalker) visitListItems(t *schema.List, list value.List) (errs ValidationErrors) { + // Keeps track of the PEs we've seen + seen := fieldpath.MakePathElementSet(list.Length()) + // Keeps tracks of the PEs we've counted as duplicates + duplicates := fieldpath.MakePathElementSet(list.Length()) for i := 0; i < list.Length(); i++ { child := list.At(i) - pe, _ := listItemToPathElement(v.allocator, v.schema, t, i, child) + pe, _ := listItemToPathElement(v.allocator, v.schema, t, child) + if seen.Has(pe) { + if duplicates.Has(pe) { + // do nothing + } else { + v.set.Insert(append(v.path, pe)) + duplicates.Insert(pe) + } + } else { + seen.Insert(pe) + } + } + + for i := 0; i < list.Length(); i++ { + child := list.At(i) + pe, _ := listItemToPathElement(v.allocator, v.schema, t, child) + if duplicates.Has(pe) { + continue + } v2 := v.prepareDescent(pe, t.ElementType) v2.value = child errs = append(errs, v2.toFieldSet()...) diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/typed.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/typed.go index 6411bd51a9..9be9028280 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/typed.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/typed.go @@ -17,8 +17,6 @@ limitations under the License. package typed import ( - "fmt" - "strings" "sync" "sigs.k8s.io/structured-merge-diff/v4/fieldpath" @@ -26,16 +24,24 @@ import ( "sigs.k8s.io/structured-merge-diff/v4/value" ) +// ValidationOptions is the list of all the options available when running the validation. +type ValidationOptions int + +const ( + // AllowDuplicates means that sets and associative lists can have duplicate similar items. + AllowDuplicates ValidationOptions = iota +) + // AsTyped accepts a value and a type and returns a TypedValue. 'v' must have // type 'typeName' in the schema. An error is returned if the v doesn't conform // to the schema. -func AsTyped(v value.Value, s *schema.Schema, typeRef schema.TypeRef) (*TypedValue, error) { +func AsTyped(v value.Value, s *schema.Schema, typeRef schema.TypeRef, opts ...ValidationOptions) (*TypedValue, error) { tv := &TypedValue{ value: v, typeRef: typeRef, schema: s, } - if err := tv.Validate(); err != nil { + if err := tv.Validate(opts...); err != nil { return nil, err } return tv, nil @@ -81,8 +87,14 @@ func (tv TypedValue) Schema() *schema.Schema { } // Validate returns an error with a list of every spec violation. -func (tv TypedValue) Validate() error { +func (tv TypedValue) Validate(opts ...ValidationOptions) error { w := tv.walker() + for _, opt := range opts { + switch opt { + case AllowDuplicates: + w.allowDuplicates = true + } + } defer w.finished() if errs := w.validate(nil); len(errs) != 0 { return errs @@ -117,6 +129,10 @@ func (tv TypedValue) Merge(pso *TypedValue) (*TypedValue, error) { return merge(&tv, pso, ruleKeepRHS, nil) } +var cmpwPool = sync.Pool{ + New: func() interface{} { return &compareWalker{} }, +} + // Compare compares the two objects. See the comments on the `Comparison` // struct for details on the return value. // @@ -124,34 +140,44 @@ func (tv TypedValue) Merge(pso *TypedValue) (*TypedValue, error) { // match), or an error will be returned. Validation errors will be returned if // the objects don't conform to the schema. func (tv TypedValue) Compare(rhs *TypedValue) (c *Comparison, err error) { - c = &Comparison{ + lhs := tv + if lhs.schema != rhs.schema { + return nil, errorf("expected objects with types from the same schema") + } + if !lhs.typeRef.Equals(&rhs.typeRef) { + return nil, errorf("expected objects of the same type, but got %v and %v", lhs.typeRef, rhs.typeRef) + } + + cmpw := cmpwPool.Get().(*compareWalker) + defer func() { + cmpw.lhs = nil + cmpw.rhs = nil + cmpw.schema = nil + cmpw.typeRef = schema.TypeRef{} + cmpw.comparison = nil + cmpw.inLeaf = false + + cmpwPool.Put(cmpw) + }() + + cmpw.lhs = lhs.value + cmpw.rhs = rhs.value + cmpw.schema = lhs.schema + cmpw.typeRef = lhs.typeRef + cmpw.comparison = &Comparison{ Removed: fieldpath.NewSet(), Modified: fieldpath.NewSet(), Added: fieldpath.NewSet(), } - a := value.NewFreelistAllocator() - _, err = merge(&tv, rhs, func(w *mergingWalker) { - if w.lhs == nil { - c.Added.Insert(w.path) - } else if w.rhs == nil { - c.Removed.Insert(w.path) - } else if !value.EqualsUsing(a, w.rhs, w.lhs) { - // TODO: Equality is not sufficient for this. - // Need to implement equality check on the value type. - c.Modified.Insert(w.path) - } - }, func(w *mergingWalker) { - if w.lhs == nil { - c.Added.Insert(w.path) - } else if w.rhs == nil { - c.Removed.Insert(w.path) - } - }) - if err != nil { - return nil, err + if cmpw.allocator == nil { + cmpw.allocator = value.NewFreelistAllocator() } - return c, nil + errs := cmpw.compare(nil) + if len(errs) > 0 { + return nil, errs + } + return cmpw.comparison, nil } // RemoveItems removes each provided list or map item from the value. @@ -166,63 +192,6 @@ func (tv TypedValue) ExtractItems(items *fieldpath.Set) *TypedValue { return &tv } -// NormalizeUnions takes the new object and normalizes the union: -// - If discriminator changed to non-nil, and a new field has been added -// that doesn't match, an error is returned, -// - If discriminator hasn't changed and two fields or more are set, an -// error is returned, -// - If discriminator changed to non-nil, all other fields but the -// discriminated one will be cleared, -// - Otherwise, If only one field is left, update discriminator to that value. -// -// Please note: union behavior isn't finalized yet and this is still experimental. -func (tv TypedValue) NormalizeUnions(new *TypedValue) (*TypedValue, error) { - var errs ValidationErrors - var normalizeFn = func(w *mergingWalker) { - if w.rhs != nil { - v := w.rhs.Unstructured() - w.out = &v - } - if err := normalizeUnions(w); err != nil { - errs = append(errs, errorf(err.Error())...) - } - } - out, mergeErrs := merge(&tv, new, func(w *mergingWalker) {}, normalizeFn) - if mergeErrs != nil { - errs = append(errs, mergeErrs.(ValidationErrors)...) - } - if len(errs) > 0 { - return nil, errs - } - return out, nil -} - -// NormalizeUnionsApply specifically normalize unions on apply. It -// validates that the applied union is correct (there should be no -// ambiguity there), and clear the fields according to the sent intent. -// -// Please note: union behavior isn't finalized yet and this is still experimental. -func (tv TypedValue) NormalizeUnionsApply(new *TypedValue) (*TypedValue, error) { - var errs ValidationErrors - var normalizeFn = func(w *mergingWalker) { - if w.rhs != nil { - v := w.rhs.Unstructured() - w.out = &v - } - if err := normalizeUnionsApply(w); err != nil { - errs = append(errs, errorf(err.Error())...) - } - } - out, mergeErrs := merge(&tv, new, func(w *mergingWalker) {}, normalizeFn) - if mergeErrs != nil { - errs = append(errs, mergeErrs.(ValidationErrors)...) - } - if len(errs) > 0 { - return nil, errs - } - return out, nil -} - func (tv TypedValue) Empty() *TypedValue { tv.value = value.NewValueInterface(nil) return &tv @@ -278,50 +247,3 @@ func merge(lhs, rhs *TypedValue, rule, postRule mergeRule) (*TypedValue, error) } return out, nil } - -// Comparison is the return value of a TypedValue.Compare() operation. -// -// No field will appear in more than one of the three fieldsets. If all of the -// fieldsets are empty, then the objects must have been equal. -type Comparison struct { - // Removed contains any fields removed by rhs (the right-hand-side - // object in the comparison). - Removed *fieldpath.Set - // Modified contains fields present in both objects but different. - Modified *fieldpath.Set - // Added contains any fields added by rhs. - Added *fieldpath.Set -} - -// IsSame returns true if the comparison returned no changes (the two -// compared objects are similar). -func (c *Comparison) IsSame() bool { - return c.Removed.Empty() && c.Modified.Empty() && c.Added.Empty() -} - -// String returns a human readable version of the comparison. -func (c *Comparison) String() string { - bld := strings.Builder{} - if !c.Modified.Empty() { - bld.WriteString(fmt.Sprintf("- Modified Fields:\n%v\n", c.Modified)) - } - if !c.Added.Empty() { - bld.WriteString(fmt.Sprintf("- Added Fields:\n%v\n", c.Added)) - } - if !c.Removed.Empty() { - bld.WriteString(fmt.Sprintf("- Removed Fields:\n%v\n", c.Removed)) - } - return bld.String() -} - -// ExcludeFields fields from the compare recursively removes the fields -// from the entire comparison -func (c *Comparison) ExcludeFields(fields *fieldpath.Set) *Comparison { - if fields == nil || fields.Empty() { - return c - } - c.Removed = c.Removed.RecursiveDifference(fields) - c.Modified = c.Modified.RecursiveDifference(fields) - c.Added = c.Added.RecursiveDifference(fields) - return c -} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/union.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/union.go deleted file mode 100644 index 1fa5d88ae6..0000000000 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/union.go +++ /dev/null @@ -1,276 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package typed - -import ( - "fmt" - "strings" - - "sigs.k8s.io/structured-merge-diff/v4/schema" - "sigs.k8s.io/structured-merge-diff/v4/value" -) - -func normalizeUnions(w *mergingWalker) error { - atom, found := w.schema.Resolve(w.typeRef) - if !found { - panic(fmt.Sprintf("Unable to resolve schema in normalize union: %v/%v", w.schema, w.typeRef)) - } - // Unions can only be in structures, and the struct must not have been removed - if atom.Map == nil || w.out == nil { - return nil - } - - var old value.Map - if w.lhs != nil && !w.lhs.IsNull() { - old = w.lhs.AsMap() - } - for _, union := range atom.Map.Unions { - if err := newUnion(&union).Normalize(old, w.rhs.AsMap(), value.NewValueInterface(*w.out).AsMap()); err != nil { - return err - } - } - return nil -} - -func normalizeUnionsApply(w *mergingWalker) error { - atom, found := w.schema.Resolve(w.typeRef) - if !found { - panic(fmt.Sprintf("Unable to resolve schema in normalize union: %v/%v", w.schema, w.typeRef)) - } - // Unions can only be in structures, and the struct must not have been removed - if atom.Map == nil || w.out == nil { - return nil - } - - var old value.Map - if w.lhs != nil && !w.lhs.IsNull() { - old = w.lhs.AsMap() - } - - for _, union := range atom.Map.Unions { - out := value.NewValueInterface(*w.out) - if err := newUnion(&union).NormalizeApply(old, w.rhs.AsMap(), out.AsMap()); err != nil { - return err - } - *w.out = out.Unstructured() - } - return nil -} - -type discriminated string -type field string - -type discriminatedNames struct { - f2d map[field]discriminated - d2f map[discriminated]field -} - -func newDiscriminatedName(f2d map[field]discriminated) discriminatedNames { - d2f := map[discriminated]field{} - for key, value := range f2d { - d2f[value] = key - } - return discriminatedNames{ - f2d: f2d, - d2f: d2f, - } -} - -func (dn discriminatedNames) toField(d discriminated) field { - if f, ok := dn.d2f[d]; ok { - return f - } - return field(d) -} - -func (dn discriminatedNames) toDiscriminated(f field) discriminated { - if d, ok := dn.f2d[f]; ok { - return d - } - return discriminated(f) -} - -type discriminator struct { - name string -} - -func (d *discriminator) Set(m value.Map, v discriminated) { - if d == nil { - return - } - m.Set(d.name, value.NewValueInterface(string(v))) -} - -func (d *discriminator) Get(m value.Map) discriminated { - if d == nil || m == nil { - return "" - } - val, ok := m.Get(d.name) - if !ok { - return "" - } - if !val.IsString() { - return "" - } - return discriminated(val.AsString()) -} - -type fieldsSet map[field]struct{} - -// newFieldsSet returns a map of the fields that are part of the union and are set -// in the given map. -func newFieldsSet(m value.Map, fields []field) fieldsSet { - if m == nil { - return nil - } - set := fieldsSet{} - for _, f := range fields { - if subField, ok := m.Get(string(f)); ok && !subField.IsNull() { - set.Add(f) - } - } - return set -} - -func (fs fieldsSet) Add(f field) { - if fs == nil { - fs = map[field]struct{}{} - } - fs[f] = struct{}{} -} - -func (fs fieldsSet) One() *field { - for f := range fs { - return &f - } - return nil -} - -func (fs fieldsSet) Has(f field) bool { - _, ok := fs[f] - return ok -} - -func (fs fieldsSet) List() []field { - fields := []field{} - for f := range fs { - fields = append(fields, f) - } - return fields -} - -func (fs fieldsSet) Difference(o fieldsSet) fieldsSet { - n := fieldsSet{} - for f := range fs { - if !o.Has(f) { - n.Add(f) - } - } - return n -} - -func (fs fieldsSet) String() string { - s := []string{} - for k := range fs { - s = append(s, string(k)) - } - return strings.Join(s, ", ") -} - -type union struct { - deduceInvalidDiscriminator bool - d *discriminator - dn discriminatedNames - f []field -} - -func newUnion(su *schema.Union) *union { - u := &union{} - if su.Discriminator != nil { - u.d = &discriminator{name: *su.Discriminator} - } - f2d := map[field]discriminated{} - for _, f := range su.Fields { - u.f = append(u.f, field(f.FieldName)) - f2d[field(f.FieldName)] = discriminated(f.DiscriminatorValue) - } - u.dn = newDiscriminatedName(f2d) - u.deduceInvalidDiscriminator = su.DeduceInvalidDiscriminator - return u -} - -// clear removes all the fields in map that are part of the union, but -// the one we decided to keep. -func (u *union) clear(m value.Map, f field) { - for _, fieldName := range u.f { - if field(fieldName) != f { - m.Delete(string(fieldName)) - } - } -} - -func (u *union) Normalize(old, new, out value.Map) error { - os := newFieldsSet(old, u.f) - ns := newFieldsSet(new, u.f) - diff := ns.Difference(os) - - if u.d.Get(old) != u.d.Get(new) && u.d.Get(new) != "" { - if len(diff) == 1 && u.d.Get(new) != u.dn.toDiscriminated(*diff.One()) { - return fmt.Errorf("discriminator (%v) and field changed (%v) don't match", u.d.Get(new), diff.One()) - } - if len(diff) > 1 { - return fmt.Errorf("multiple new fields added: %v", diff) - } - u.clear(out, u.dn.toField(u.d.Get(new))) - return nil - } - - if len(ns) > 1 { - return fmt.Errorf("multiple fields set without discriminator change: %v", ns) - } - - // Set discriminiator if it needs to be deduced. - if u.deduceInvalidDiscriminator && len(ns) == 1 { - u.d.Set(out, u.dn.toDiscriminated(*ns.One())) - } - - return nil -} - -func (u *union) NormalizeApply(applied, merged, out value.Map) error { - as := newFieldsSet(applied, u.f) - if len(as) > 1 { - return fmt.Errorf("more than one field of union applied: %v", as) - } - if len(as) == 0 { - // None is set, just leave. - return nil - } - // We have exactly one, discriminiator must match if set - if u.d.Get(applied) != "" && u.d.Get(applied) != u.dn.toDiscriminated(*as.One()) { - return fmt.Errorf("applied discriminator (%v) doesn't match applied field (%v)", u.d.Get(applied), *as.One()) - } - - // Update discriminiator if needed - if u.deduceInvalidDiscriminator { - u.d.Set(out, u.dn.toDiscriminated(*as.One())) - } - // Clear others fields. - u.clear(out, *as.One()) - - return nil -} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/validate.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/validate.go index edddbafa42..652e24c819 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/validate.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/validate.go @@ -33,6 +33,7 @@ func (tv TypedValue) walker() *validatingObjectWalker { v.value = tv.value v.schema = tv.schema v.typeRef = tv.typeRef + v.allowDuplicates = false if v.allocator == nil { v.allocator = value.NewFreelistAllocator() } @@ -49,6 +50,9 @@ type validatingObjectWalker struct { value value.Value schema *schema.Schema typeRef schema.TypeRef + // If set to true, duplicates will be allowed in + // associativeLists/sets. + allowDuplicates bool // Allocate only as many walkers as needed for the depth by storing them here. spareWalkers *[]*validatingObjectWalker @@ -129,7 +133,7 @@ func (v *validatingObjectWalker) visitListItems(t *schema.List, list value.List) pe.Index = &i } else { var err error - pe, err = listItemToPathElement(v.allocator, v.schema, t, i, child) + pe, err = listItemToPathElement(v.allocator, v.schema, t, child) if err != nil { errs = append(errs, errorf("element %v: %v", i, err.Error())...) // If we can't construct the path element, we can't @@ -137,7 +141,7 @@ func (v *validatingObjectWalker) visitListItems(t *schema.List, list value.List) // this element. return } - if observedKeys.Has(pe) { + if observedKeys.Has(pe) && !v.allowDuplicates { errs = append(errs, errorf("duplicate entries for key %v", pe.String())...) } observedKeys.Insert(pe)