diff --git a/api/go.mod b/api/go.mod index dc90d841c..6b2ed23e0 100644 --- a/api/go.mod +++ b/api/go.mod @@ -4,9 +4,8 @@ go 1.16 require ( github.com/fluxcd/pkg/apis/kustomize v0.1.0 - github.com/fluxcd/pkg/apis/meta v0.10.0 - github.com/fluxcd/pkg/runtime v0.12.0 - k8s.io/apiextensions-apiserver v0.22.1 - k8s.io/apimachinery v0.22.1 + github.com/fluxcd/pkg/apis/meta v0.11.0-rc.1 + k8s.io/apiextensions-apiserver v0.22.2 + k8s.io/apimachinery v0.22.2 sigs.k8s.io/controller-runtime v0.9.5 ) diff --git a/api/go.sum b/api/go.sum index 3e8ada233..a42a6b3dd 100644 --- a/api/go.sum +++ b/api/go.sum @@ -113,10 +113,8 @@ github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5Kwzbycv github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fluxcd/pkg/apis/kustomize v0.1.0 h1:sauL+KHmZ0zV2ZgpsLMyDzCQudBTtaFzSys+rXn9g9w= github.com/fluxcd/pkg/apis/kustomize v0.1.0/go.mod h1:gEl+W5cVykCC3RfrCaqe+Pz+j4lKl2aeR4dxsom/zII= -github.com/fluxcd/pkg/apis/meta v0.10.0 h1:N7wVGHC1cyPdT87hrDC7UwCwRwnZdQM46PBSLjG2rlE= -github.com/fluxcd/pkg/apis/meta v0.10.0/go.mod h1:CW9X9ijMTpNe7BwnokiUOrLl/h13miwVr/3abEQLbKE= -github.com/fluxcd/pkg/runtime v0.12.0 h1:BPZZ8bBkimpqGAPXqOf3LTaw+tcw6HgbWyCuzbbsJGs= -github.com/fluxcd/pkg/runtime v0.12.0/go.mod h1:EyaTR2TOYcjL5U//C4yH3bt2tvTgIOSXpVRbWxUn/C4= +github.com/fluxcd/pkg/apis/meta v0.11.0-rc.1 h1:RHHrztAFv9wmjM+Pk7Svt1UdD+1SdnQSp76MWFiM7Hg= +github.com/fluxcd/pkg/apis/meta v0.11.0-rc.1/go.mod h1:yUblM2vg+X8TE3A2VvJfdhkGmg+uqBlSPkLk7dxi0UM= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= @@ -230,11 +228,9 @@ github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBt github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-retryablehttp v0.6.8/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= @@ -329,14 +325,12 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= github.com/onsi/gomega v1.14.0 h1:ep6kpPVwmr/nTbklSx2nrLNSIO62DoYAhnPNIMhK8gI= github.com/onsi/gomega v1.14.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= @@ -777,28 +771,29 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.21.1/go.mod h1:FstGROTmsSHBarKc8bylzXih8BLNYTiS3TZcsoEDg2s= k8s.io/api v0.21.3/go.mod h1:hUgeYHUbBp23Ue4qdX9tR8/ANi/g3ehylAqDn9NWVOg= -k8s.io/api v0.22.1 h1:ISu3tD/jRhYfSW8jI/Q1e+lRxkR7w9UwQEZ7FgslrwY= -k8s.io/api v0.22.1/go.mod h1:bh13rkTp3F1XEaLGykbyRD2QaTTzPm0e/BMd8ptFONY= +k8s.io/api v0.22.2 h1:M8ZzAD0V6725Fjg53fKeTJxGsJvRbk4TEm/fexHMtfw= +k8s.io/api v0.22.2/go.mod h1:y3ydYpLJAaDI+BbSe2xmGcqxiWHmWjkEeIbiwHvnPR8= k8s.io/apiextensions-apiserver v0.21.1/go.mod h1:KESQFCGjqVcVsZ9g0xX5bacMjyX5emuWcS2arzdEouA= k8s.io/apiextensions-apiserver v0.21.3/go.mod h1:kl6dap3Gd45+21Jnh6utCx8Z2xxLm8LGDkprcd+KbsE= -k8s.io/apiextensions-apiserver v0.22.1 h1:YSJYzlFNFSfUle+yeEXX0lSQyLEoxoPJySRupepb0gE= -k8s.io/apiextensions-apiserver v0.22.1/go.mod h1:HeGmorjtRmRLE+Q8dJu6AYRoZccvCMsghwS8XTUYb2c= +k8s.io/apiextensions-apiserver v0.22.2 h1:zK7qI8Ery7j2CaN23UCFaC1hj7dMiI87n01+nKuewd4= +k8s.io/apiextensions-apiserver v0.22.2/go.mod h1:2E0Ve/isxNl7tWLSUDgi6+cmwHi5fQRdwGVCxbC+KFA= k8s.io/apimachinery v0.21.1/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY= +k8s.io/apimachinery v0.21.2/go.mod h1:CdTY8fU/BlvAbJ2z/8kBwimGki5Zp8/fbVuLY8gJumM= k8s.io/apimachinery v0.21.3/go.mod h1:H/IM+5vH9kZRNJ4l3x/fXP/5bOPJaVP/guptnZPeCFI= -k8s.io/apimachinery v0.22.1 h1:DTARnyzmdHMz7bFWFDDm22AM4pLWTQECMpRTFu2d2OM= -k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= +k8s.io/apimachinery v0.22.2 h1:ejz6y/zNma8clPVfNDLnPbleBo6MpoFy/HBiBqCouVk= +k8s.io/apimachinery v0.22.2/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= k8s.io/apiserver v0.21.1/go.mod h1:nLLYZvMWn35glJ4/FZRhzLG/3MPxAaZTgV4FJZdr+tY= k8s.io/apiserver v0.21.3/go.mod h1:eDPWlZG6/cCCMj/JBcEpDoK+I+6i3r9GsChYBHSbAzU= -k8s.io/apiserver v0.22.1/go.mod h1:2mcM6dzSt+XndzVQJX21Gx0/Klo7Aen7i0Ai6tIa400= +k8s.io/apiserver v0.22.2/go.mod h1:vrpMmbyjWrgdyOvZTSpsusQq5iigKNWv9o9KlDAbBHI= k8s.io/client-go v0.21.1/go.mod h1:/kEw4RgW+3xnBGzvp9IWxKSNA+lXn3A7AuH3gdOAzLs= k8s.io/client-go v0.21.3/go.mod h1:+VPhCgTsaFmGILxR/7E1N0S+ryO010QBeNCv5JwRGYU= -k8s.io/client-go v0.22.1/go.mod h1:BquC5A4UOo4qVDUtoc04/+Nxp1MeHcVc1HJm1KmG8kk= +k8s.io/client-go v0.22.2/go.mod h1:sAlhrkVDf50ZHx6z4K0S40wISNTarf1r800F+RlCF6U= k8s.io/code-generator v0.21.1/go.mod h1:hUlps5+9QaTrKx+jiM4rmq7YmH8wPOIko64uZCHDh6Q= k8s.io/code-generator v0.21.3/go.mod h1:K3y0Bv9Cz2cOW2vXUrNZlFbflhuPvuadW6JdnN6gGKo= -k8s.io/code-generator v0.22.1/go.mod h1:eV77Y09IopzeXOJzndrDyCI88UBok2h6WxAlBwpxa+o= +k8s.io/code-generator v0.22.2/go.mod h1:eV77Y09IopzeXOJzndrDyCI88UBok2h6WxAlBwpxa+o= k8s.io/component-base v0.21.1/go.mod h1:NgzFZ2qu4m1juby4TnrmpR8adRk6ka62YdH5DkIIyKA= k8s.io/component-base v0.21.3/go.mod h1:kkuhtfEHeZM6LkX0saqSK8PbdO7A0HigUngmhhrwfGQ= -k8s.io/component-base v0.22.1/go.mod h1:0D+Bl8rrnsPN9v0dyYvkqFfBeAd4u7n77ze+p8CMiPo= +k8s.io/component-base v0.22.2/go.mod h1:5Br2QhI9OTe79p+TzPe9JKNQYvEKbq9rTJDWllunGug= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= @@ -809,17 +804,15 @@ k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20210527160623-6fdb442a123b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20210722164352-7f3ee0f31471 h1:DnzUXII7sVg1FJ/4JX6YDRJfLNAC7idRatPwe07suiI= k8s.io/utils v0.0.0-20210722164352-7f3ee0f31471/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a h1:8dYfu/Fc9Gz2rNJKB9IQRGgQOh2clmRzNIPPY1xLY5g= +k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.19/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= -sigs.k8s.io/controller-runtime v0.9.0/go.mod h1:TgkfvrhhEw3PlI0BRL/5xM+89y3/yc0ZDfdbTl84si8= sigs.k8s.io/controller-runtime v0.9.5 h1:WThcFE6cqctTn2jCZprLICO6BaKZfhsT37uAapTNfxc= sigs.k8s.io/controller-runtime v0.9.5/go.mod h1:q6PpkM5vqQubEKUKOM6qr06oXGzOBcCby1DA9FbyZeA= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= diff --git a/api/v2beta1/condition_types.go b/api/v2beta1/condition_types.go index 2aed2bb15..4dda151bf 100644 --- a/api/v2beta1/condition_types.go +++ b/api/v2beta1/condition_types.go @@ -29,6 +29,13 @@ const ( // (uninstall/rollback) due to a failure of the last release attempt against the // latest desired state. RemediatedCondition string = "Remediated" + + // InitFailedCondtion represents the failure of the initial setup steps of the helm release. + InitFailedCondition string = "InitFailed" + + // RetriesExhaustedCondition represents the failues when the maximum number + // or retries has been reached. + RetriesExhaustedCondition string = "RetriesExhausted" ) const ( @@ -83,4 +90,12 @@ const ( // GetLastReleaseFailedReason represents the fact that observing the last // release failed. GetLastReleaseFailedReason string = "GetLastReleaseFailed" + + // GetHelmChartFailedReason represents the fact that the Helm chart for + // the release could not be fetched. + GetHelmChartFailedReason string = "GetHelmChartFailed" + + // DependencyFailedReason represents the fact that resources the Helm release + // depends on are not ready. + DependencyNotReadyReason string = "DependencyNotReady" ) diff --git a/api/v2beta1/helmrelease_types.go b/api/v2beta1/helmrelease_types.go index 3dca93e34..6332c1120 100644 --- a/api/v2beta1/helmrelease_types.go +++ b/api/v2beta1/helmrelease_types.go @@ -27,7 +27,6 @@ import ( "github.com/fluxcd/pkg/apis/kustomize" "github.com/fluxcd/pkg/apis/meta" - "github.com/fluxcd/pkg/runtime/dependency" ) const HelmReleaseKind = "HelmRelease" @@ -102,11 +101,11 @@ type HelmReleaseSpec struct { // +optional StorageNamespace string `json:"storageNamespace,omitempty"` - // DependsOn may contain a dependency.CrossNamespaceDependencyReference slice with + // DependsOn may contain a meta.NamespacedObjectReference slice with // references to HelmRelease resources that must be ready before this HelmRelease // can be reconciled. // +optional - DependsOn []dependency.CrossNamespaceDependencyReference `json:"dependsOn,omitempty"` + DependsOn []meta.NamespacedObjectReference `json:"dependsOn,omitempty"` // Timeout is the time to wait for any individual Kubernetes operation (like Jobs // for hooks) during the performance of a Helm action. Defaults to '5m0s'. @@ -246,15 +245,6 @@ type HelmChartTemplateSpec struct { // +optional Interval *metav1.Duration `json:"interval,omitempty"` - // Determines what enables the creation of a new artifact. Valid values are - // ('ChartVersion', 'Revision'). - // See the documentation of the values for an explanation on their behavior. - // Defaults to ChartVersion when omitted. - // +kubebuilder:validation:Enum=ChartVersion;Revision - // +kubebuilder:default:=ChartVersion - // +optional - ReconcileStrategy string `json:"reconcileStrategy,omitempty"` - // Alternative list of values files to use as the chart values (values.yaml // is not included by default), expected to be a relative path in the SourceRef. // Values files are merged in the order of this list with the last file overriding @@ -304,9 +294,9 @@ type Remediation interface { MustIgnoreTestFailures(bool) bool MustRemediateLastFailure() bool GetStrategy() RemediationStrategy - GetFailureCount(hr HelmRelease) int64 + GetFailureCount(hr *HelmRelease) int64 IncrementFailureCount(hr *HelmRelease) - RetriesExhausted(hr HelmRelease) bool + RetriesExhausted(hr *HelmRelease) bool } // Install holds the configuration for Helm install actions performed for this @@ -456,7 +446,7 @@ func (in InstallRemediation) GetStrategy() RemediationStrategy { } // GetFailureCount gets the failure count. -func (in InstallRemediation) GetFailureCount(hr HelmRelease) int64 { +func (in InstallRemediation) GetFailureCount(hr *HelmRelease) int64 { return hr.Status.InstallFailures } @@ -466,7 +456,7 @@ func (in InstallRemediation) IncrementFailureCount(hr *HelmRelease) { } // RetriesExhausted returns true if there are no remaining retries. -func (in InstallRemediation) RetriesExhausted(hr HelmRelease) bool { +func (in InstallRemediation) RetriesExhausted(hr *HelmRelease) bool { return in.Retries >= 0 && in.GetFailureCount(hr) > int64(in.Retries) } @@ -635,7 +625,7 @@ func (in UpgradeRemediation) GetStrategy() RemediationStrategy { } // GetFailureCount gets the failure count. -func (in UpgradeRemediation) GetFailureCount(hr HelmRelease) int64 { +func (in UpgradeRemediation) GetFailureCount(hr *HelmRelease) int64 { return hr.Status.UpgradeFailures } @@ -645,7 +635,7 @@ func (in UpgradeRemediation) IncrementFailureCount(hr *HelmRelease) { } // RetriesExhausted returns true if there are no remaining retries. -func (in UpgradeRemediation) RetriesExhausted(hr HelmRelease) bool { +func (in UpgradeRemediation) RetriesExhausted(hr *HelmRelease) bool { return in.Retries >= 0 && in.GetFailureCount(hr) > int64(in.Retries) } @@ -824,36 +814,9 @@ func (in HelmReleaseStatus) GetHelmChart() (string, string) { return split[0], split[1] } -// HelmReleaseProgressing resets any failures and registers progress toward -// reconciling the given HelmRelease by setting the meta.ReadyCondition to -// 'Unknown' for meta.ProgressingReason. -func HelmReleaseProgressing(hr HelmRelease) HelmRelease { - hr.Status.Conditions = []metav1.Condition{} - meta.SetResourceCondition(&hr, meta.ReadyCondition, metav1.ConditionUnknown, meta.ProgressingReason, - "Reconciliation in progress") - resetFailureCounts(&hr) - return hr -} - -// HelmReleaseNotReady registers a failed reconciliation of the given HelmRelease. -func HelmReleaseNotReady(hr HelmRelease, reason, message string) HelmRelease { - meta.SetResourceCondition(&hr, meta.ReadyCondition, metav1.ConditionFalse, reason, message) - hr.Status.Failures++ - return hr -} - -// HelmReleaseReady registers a successful reconciliation of the given HelmRelease. -func HelmReleaseReady(hr HelmRelease) HelmRelease { - meta.SetResourceCondition(&hr, meta.ReadyCondition, metav1.ConditionTrue, meta.ReconciliationSucceededReason, - "Release reconciliation succeeded") - hr.Status.LastAppliedRevision = hr.Status.LastAttemptedRevision - resetFailureCounts(&hr) - return hr -} - // HelmReleaseAttempted registers an attempt of the given HelmRelease with the given state. // and returns the modified HelmRelease and a boolean indicating a state change. -func HelmReleaseAttempted(hr HelmRelease, revision string, releaseRevision int, valuesChecksum string) (HelmRelease, bool) { +func HelmReleaseAttempted(hr *HelmRelease, revision string, releaseRevision int, valuesChecksum string) (*HelmRelease, bool) { changed := hr.Status.LastAttemptedRevision != revision || hr.Status.LastReleaseRevision != releaseRevision || hr.Status.LastAttemptedValuesChecksum != valuesChecksum @@ -864,12 +827,6 @@ func HelmReleaseAttempted(hr HelmRelease, revision string, releaseRevision int, return hr, changed } -func resetFailureCounts(hr *HelmRelease) { - hr.Status.Failures = 0 - hr.Status.InstallFailures = 0 - hr.Status.UpgradeFailures = 0 -} - const ( // SourceIndexKey is the key used for indexing HelmReleases based on // their sources. @@ -895,6 +852,18 @@ type HelmRelease struct { Status HelmReleaseStatus `json:"status,omitempty"` } +// IncrementFailureCounter adds one to the failure counter. +func (in *HelmRelease) IncrementFailureCounter() { + in.Status.Failures++ +} + +// ResetFailureCounter sets all failure counters back to zero. +func (in *HelmRelease) ResetFailureCounter() { + in.Status.Failures = 0 + in.Status.InstallFailures = 0 + in.Status.UpgradeFailures = 0 +} + // GetValues unmarshals the raw values to a map[string]interface{} and returns // the result. func (in HelmRelease) GetValues() map[string]interface{} { @@ -958,7 +927,7 @@ func (in HelmRelease) GetMaxHistory() int { // GetDependsOn returns the types.NamespacedName of the HelmRelease, and a // dependency.CrossNamespaceDependencyReference slice it depends on. -func (in HelmRelease) GetDependsOn() (types.NamespacedName, []dependency.CrossNamespaceDependencyReference) { +func (in HelmRelease) GetDependsOn() (types.NamespacedName, []meta.NamespacedObjectReference) { return types.NamespacedName{ Namespace: in.Namespace, Name: in.Namespace, @@ -966,10 +935,21 @@ func (in HelmRelease) GetDependsOn() (types.NamespacedName, []dependency.CrossNa } // GetStatusConditions returns a pointer to the Status.Conditions slice +// Deprecated: use GetConditions instead. func (in *HelmRelease) GetStatusConditions() *[]metav1.Condition { return &in.Status.Conditions } +// GetConditions returns the status conditions of the object. +func (in *HelmRelease) GetConditions() []metav1.Condition { + return in.Status.Conditions +} + +// SetConditions sets the status conditions on the object. +func (in *HelmRelease) SetConditions(conditions []metav1.Condition) { + in.Status.Conditions = conditions +} + // +kubebuilder:object:root=true // HelmReleaseList contains a list of HelmRelease objects. diff --git a/api/v2beta1/zz_generated.deepcopy.go b/api/v2beta1/zz_generated.deepcopy.go index 194dc0cc5..bc2b23c35 100644 --- a/api/v2beta1/zz_generated.deepcopy.go +++ b/api/v2beta1/zz_generated.deepcopy.go @@ -22,7 +22,7 @@ package v2beta1 import ( "github.com/fluxcd/pkg/apis/kustomize" - "github.com/fluxcd/pkg/runtime/dependency" + "github.com/fluxcd/pkg/apis/meta" "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -156,7 +156,7 @@ func (in *HelmReleaseSpec) DeepCopyInto(out *HelmReleaseSpec) { } if in.DependsOn != nil { in, out := &in.DependsOn, &out.DependsOn - *out = make([]dependency.CrossNamespaceDependencyReference, len(*in)) + *out = make([]meta.NamespacedObjectReference, len(*in)) copy(*out, *in) } if in.Timeout != nil { diff --git a/config/crd/bases/helm.toolkit.fluxcd.io_helmreleases.yaml b/config/crd/bases/helm.toolkit.fluxcd.io_helmreleases.yaml index 067a70d48..137025142 100644 --- a/config/crd/bases/helm.toolkit.fluxcd.io_helmreleases.yaml +++ b/config/crd/bases/helm.toolkit.fluxcd.io_helmreleases.yaml @@ -56,13 +56,6 @@ spec: interval: description: Interval at which to check the v1beta1.Source for updates. Defaults to 'HelmReleaseSpec.Interval'. type: string - reconcileStrategy: - default: ChartVersion - description: Determines what enables the creation of a new artifact. Valid values are ('ChartVersion', 'Revision'). See the documentation of the values for an explanation on their behavior. Defaults to ChartVersion when omitted. - enum: - - ChartVersion - - Revision - type: string sourceRef: description: The name and namespace of the v1beta1.Source the chart is available at. properties: @@ -109,15 +102,15 @@ spec: - spec type: object dependsOn: - description: DependsOn may contain a dependency.CrossNamespaceDependencyReference slice with references to HelmRelease resources that must be ready before this HelmRelease can be reconciled. + description: DependsOn may contain a meta.NamespacedObjectReference slice with references to HelmRelease resources that must be ready before this HelmRelease can be reconciled. items: - description: CrossNamespaceDependencyReference holds the reference to a dependency. + description: NamespacedObjectReference contains enough information to locate the referenced Kubernetes resource object in any namespace. properties: name: - description: Name holds the name reference of a dependency. + description: Name of the referent. type: string namespace: - description: Namespace holds the namespace reference of a dependency. + description: Namespace of the referent, when not specified it acts as LocalObjectReference. type: string required: - name @@ -181,7 +174,7 @@ spec: description: SecretRef holds the name to a secret that contains a 'value' key with the kubeconfig file as the value. It must be in the same namespace as the HelmRelease. It is recommended that the kubeconfig is self-contained, and the secret is regularly updated if credentials such as a cloud-access-token expire. Cloud specific `cmd-path` auth helpers will not function without adding binaries and credentials to the Pod that is responsible for reconciling the HelmRelease. properties: name: - description: Name of the referent + description: Name of the referent. type: string required: - name @@ -523,7 +516,7 @@ spec: description: LastAttemptedValuesChecksum is the SHA1 checksum of the values of the last reconciliation attempt. type: string lastHandledReconcileAt: - description: LastHandledReconcileAt holds the value of the most recent reconcile request value, so a change can be detected. + description: LastHandledReconcileAt holds the value of the most recent reconcile request value, so a change of the annotation value can be detected. type: string lastReleaseRevision: description: LastReleaseRevision is the revision of the last successful Helm release. diff --git a/controllers/helmrelease_controller.go b/controllers/helmrelease_controller.go index 6e2a47ffc..375e48009 100644 --- a/controllers/helmrelease_controller.go +++ b/controllers/helmrelease_controller.go @@ -31,14 +31,11 @@ import ( "helm.sh/helm/v3/pkg/strvals" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" - apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/cli-runtime/pkg/genericclioptions" "k8s.io/client-go/rest" - kuberecorder "k8s.io/client-go/tools/record" - "k8s.io/client-go/tools/reference" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" @@ -50,8 +47,10 @@ import ( "sigs.k8s.io/controller-runtime/pkg/source" "github.com/fluxcd/pkg/apis/meta" + "github.com/fluxcd/pkg/runtime/conditions" + helper "github.com/fluxcd/pkg/runtime/controller" "github.com/fluxcd/pkg/runtime/events" - "github.com/fluxcd/pkg/runtime/metrics" + "github.com/fluxcd/pkg/runtime/patch" "github.com/fluxcd/pkg/runtime/predicates" "github.com/fluxcd/pkg/runtime/transform" sourcev1 "github.com/fluxcd/source-controller/api/v1beta1" @@ -72,13 +71,18 @@ import ( // HelmReleaseReconciler reconciles a HelmRelease object type HelmReleaseReconciler struct { client.Client - httpClient *retryablehttp.Client - Config *rest.Config - Scheme *runtime.Scheme - requeueDependency time.Duration - EventRecorder kuberecorder.EventRecorder - ExternalEventRecorder *events.Recorder - MetricsRecorder *metrics.Recorder + helper.Events + helper.Metrics + + Config *rest.Config + httpClient *retryablehttp.Client + requeueDependency time.Duration +} + +type HelmReleaseReconcilerOptions struct { + MaxConcurrentReconciles int + HTTPRetry int + DependencyRequeueInterval time.Duration } func (r *HelmReleaseReconciler) SetupWithManager(mgr ctrl.Manager, opts HelmReleaseReconcilerOptions) error { @@ -128,170 +132,216 @@ func (c ConditionError) Error() string { return c.Err.Error() } -func (r *HelmReleaseReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { +func (r *HelmReleaseReconciler) Reconcile(ctx context.Context, req ctrl.Request) (result ctrl.Result, retErr error) { start := time.Now() log := logr.FromContext(ctx) - var hr v2.HelmRelease - if err := r.Get(ctx, req.NamespacedName, &hr); err != nil { + obj := &v2.HelmRelease{} + if err := r.Get(ctx, req.NamespacedName, obj); err != nil { return ctrl.Result{}, client.IgnoreNotFound(err) } - // record suspension metrics - defer r.recordSuspension(ctx, hr) - - // Add our finalizer if it does not exist - if !controllerutil.ContainsFinalizer(&hr, v2.HelmReleaseFinalizer) { - controllerutil.AddFinalizer(&hr, v2.HelmReleaseFinalizer) - if err := r.Update(ctx, &hr); err != nil { - log.Error(err, "unable to register finalizer") - return ctrl.Result{}, err - } - } - - // Examine if the object is under deletion - if !hr.ObjectMeta.DeletionTimestamp.IsZero() { - return r.reconcileDelete(ctx, hr) - } + r.RecordSuspend(ctx, obj, obj.Spec.Suspend) // Return early if the HelmRelease is suspended. - if hr.Spec.Suspend { + if obj.Spec.Suspend { log.Info("Reconciliation is suspended for this object") return ctrl.Result{}, nil } - hr, result, err := r.reconcile(ctx, hr) - - // Update status after reconciliation. - if updateStatusErr := r.patchStatus(ctx, &hr); updateStatusErr != nil { - log.Error(updateStatusErr, "unable to update status after reconciliation") - return ctrl.Result{Requeue: true}, updateStatusErr + // Initialize the patch helper + patchHelper, err := patch.NewHelper(obj, r.Client) + if err != nil { + return ctrl.Result{}, err } - // Record ready status - r.recordReadiness(ctx, hr) + // Always attempt to patch the object and status after each reconciliation + defer func() { + // Record the value of the reconciliation request, if any + if v, ok := meta.ReconcileAnnotationValue(obj.GetAnnotations()); ok { + obj.Status.SetLastHandledReconcileRequest(v) + } - // Log reconciliation duration - durationMsg := fmt.Sprintf("reconcilation finished in %s", time.Now().Sub(start).String()) - if result.RequeueAfter > 0 { - durationMsg = fmt.Sprintf("%s, next run in %s", durationMsg, result.RequeueAfter.String()) - } - log.Info(durationMsg) + // TODO: Handle test condition when tests are enabled + // Summarize Ready condition + conditions.SetSummary(obj, + meta.ReadyCondition, + conditions.WithConditions( + v2.ReleasedCondition, + //v2.TestSuccessCondition, + v2.RemediatedCondition, + v2.InitFailedCondition, + v2.RetriesExhaustedCondition, + ), + conditions.WithNegativePolarityConditions( + v2.RemediatedCondition, + v2.InitFailedCondition, + v2.RetriesExhaustedCondition, + ), + ) + + // Patch the object, ignoring conflicts on the conditions owned by this controller + patchOpts := []patch.Option{ + patch.WithOwnedConditions{ + Conditions: []string{ + v2.ReleasedCondition, + v2.TestSuccessCondition, + v2.RemediatedCondition, + v2.InitFailedCondition, + v2.RetriesExhaustedCondition, + meta.ReadyCondition, + meta.ReconcilingCondition, + meta.StalledCondition, + }, + }, + } - return result, err -} + // Determine if the resource is still being reconciled, or if it has stalled, and record this observation + if retErr == nil && (result.IsZero() || !result.Requeue) { + // We are no longer reconciling + conditions.Delete(obj, meta.ReconcilingCondition) + + // We have now observed this generation + patchOpts = append(patchOpts, patch.WithStatusObservedGeneration{}) + + readyCondition := conditions.Get(obj, meta.ReadyCondition) + switch readyCondition.Status { + case metav1.ConditionFalse: + // As we are no longer reconciling and the end-state + // is not ready, the reconciliation has stalled + conditions.MarkStalled(obj, readyCondition.Reason, readyCondition.Message) + case metav1.ConditionTrue: + // As we are no longer reconciling and the end-state + // is ready, the reconciliation is no longer stalled + conditions.Delete(obj, meta.StalledCondition) + } + } -func (r *HelmReleaseReconciler) reconcile(ctx context.Context, hr v2.HelmRelease) (v2.HelmRelease, ctrl.Result, error) { - reconcileStart := time.Now() - log := logr.FromContext(ctx) - // Record the value of the reconciliation request, if any - if v, ok := meta.ReconcileAnnotationValue(hr.GetAnnotations()); ok { - hr.Status.SetLastHandledReconcileRequest(v) - } - - // Observe HelmRelease generation. - if hr.Status.ObservedGeneration != hr.Generation { - hr.Status.ObservedGeneration = hr.Generation - hr = v2.HelmReleaseProgressing(hr) - if updateStatusErr := r.patchStatus(ctx, &hr); updateStatusErr != nil { - log.Error(updateStatusErr, "unable to update status after generation update") - return hr, ctrl.Result{Requeue: true}, updateStatusErr + // Finally, patch the resource + if err := patchHelper.Patch(ctx, obj, patchOpts...); err != nil { + retErr = kerrors.NewAggregate([]error{retErr, err}) } - // Record progressing status - r.recordReadiness(ctx, hr) + + // Always record readiness and duration metrics + r.Metrics.RecordReadiness(ctx, obj) + r.Metrics.RecordDuration(ctx, obj, start) + }() + + // Add finalizer first if not exit to avoid the race condition + // between init and delete + if !controllerutil.ContainsFinalizer(obj, v2.HelmReleaseFinalizer) { + controllerutil.AddFinalizer(obj, v2.HelmReleaseFinalizer) + return ctrl.Result{Requeue: true}, nil } - // Record reconciliation duration - if r.MetricsRecorder != nil { - objRef, err := reference.GetReference(r.Scheme, &hr) - if err != nil { - return hr, ctrl.Result{Requeue: true}, err - } - defer r.MetricsRecorder.RecordDuration(*objRef, reconcileStart) + // Examine if the object is under deletion + if !obj.ObjectMeta.DeletionTimestamp.IsZero() { + return r.reconcileDelete(ctx, obj) } + // Reconcile actual object + return r.reconcile(ctx, obj) +} + +func (r *HelmReleaseReconciler) reconcile(ctx context.Context, obj *v2.HelmRelease) (ctrl.Result, error) { + log := logr.FromContext(ctx) + + // Mark the resource as under reconciliation + conditions.MarkReconciling(obj, meta.ProgressingReason, "") + + // Remove InitFailedCondition + conditions.Delete(obj, v2.InitFailedCondition) + // Reconcile chart based on the HelmChartTemplate - hc, reconcileErr := r.reconcileChart(ctx, &hr) + hc, reconcileErr := r.reconcileChart(ctx, obj) if reconcileErr != nil { - msg := fmt.Sprintf("chart reconciliation failed: %s", reconcileErr.Error()) - r.event(ctx, hr, hr.Status.LastAttemptedRevision, events.EventSeverityError, msg) - return v2.HelmReleaseNotReady(hr, v2.ArtifactFailedReason, msg), ctrl.Result{Requeue: true}, reconcileErr + obj.IncrementFailureCounter() + conditions.MarkTrue(obj, v2.InitFailedCondition, v2.ArtifactFailedReason, "Chart reconcilliation failed: %s", reconcileErr.Error()) + r.Eventf(ctx, obj, events.EventSeverityError, v2.ArtifactFailedReason, "Chart reconcilliation failed: %s", reconcileErr.Error()) + return ctrl.Result{Requeue: true}, reconcileErr } - // Check chart readiness - if hc.Generation != hc.Status.ObservedGeneration || !apimeta.IsStatusConditionTrue(hc.Status.Conditions, meta.ReadyCondition) { + if !conditions.IsReady(hc) { msg := fmt.Sprintf("HelmChart '%s/%s' is not ready", hc.GetNamespace(), hc.GetName()) - r.event(ctx, hr, hr.Status.LastAttemptedRevision, events.EventSeverityInfo, msg) + obj.IncrementFailureCounter() + conditions.MarkTrue(obj, v2.InitFailedCondition, v2.ArtifactFailedReason, msg) + r.Eventf(ctx, obj, events.EventSeverityError, v2.ArtifactFailedReason, msg) log.Info(msg) // Do not requeue immediately, when the artifact is created // the watcher should trigger a reconciliation. - return v2.HelmReleaseNotReady(hr, v2.ArtifactFailedReason, msg), ctrl.Result{RequeueAfter: hc.Spec.Interval.Duration}, nil + return ctrl.Result{RequeueAfter: hc.Spec.Interval.Duration}, nil } // Check dependencies - if len(hr.Spec.DependsOn) > 0 { - if err := r.checkDependencies(hr); err != nil { - msg := fmt.Sprintf("dependencies do not meet ready condition (%s), retrying in %s", - err.Error(), r.requeueDependency.String()) - r.event(ctx, hr, hc.GetArtifact().Revision, events.EventSeverityInfo, msg) + if len(obj.Spec.DependsOn) > 0 { + if err := r.checkDependencies(ctx, obj); err != nil { + msg := fmt.Sprintf("dependencies do not meet ready condition (%s), retrying in %s", err.Error(), r.requeueDependency.String()) + obj.IncrementFailureCounter() + conditions.MarkTrue(obj, v2.InitFailedCondition, v2.DependencyNotReadyReason, msg) + r.Eventf(ctx, obj, events.EventSeverityError, v2.DependencyNotReadyReason, msg) log.Info(msg) - // Exponential backoff would cause execution to be prolonged too much, // instead we requeue on a fixed interval. - return v2.HelmReleaseNotReady(hr, - meta.DependencyNotReadyReason, err.Error()), ctrl.Result{RequeueAfter: r.requeueDependency}, nil + return ctrl.Result{RequeueAfter: r.requeueDependency}, nil } log.Info("all dependencies are ready, proceeding with release") } // Compose values - values, err := r.composeValues(ctx, hr) + values, err := r.composeValues(ctx, obj) if err != nil { - r.event(ctx, hr, hr.Status.LastAttemptedRevision, events.EventSeverityError, err.Error()) - return v2.HelmReleaseNotReady(hr, v2.InitFailedReason, err.Error()), ctrl.Result{Requeue: true}, nil + obj.IncrementFailureCounter() + conditions.MarkTrue(obj, v2.InitFailedCondition, v2.InitFailedReason, "could not get chart values: %s", err.Error()) + r.Eventf(ctx, obj, events.EventSeverityError, v2.InitFailedReason, "could not get chart values: %s", err.Error()) + return ctrl.Result{Requeue: true}, nil } // Load chart from artifact chart, err := r.loadHelmChart(hc) if err != nil { - r.event(ctx, hr, hr.Status.LastAttemptedRevision, events.EventSeverityError, err.Error()) - return v2.HelmReleaseNotReady(hr, v2.ArtifactFailedReason, err.Error()), ctrl.Result{Requeue: true}, nil + obj.IncrementFailureCounter() + conditions.MarkTrue(obj, v2.InitFailedCondition, v2.ArtifactFailedReason, "could not load chart: %s", err.Error()) + r.Eventf(ctx, obj, events.EventSeverityError, v2.ArtifactFailedReason, "could not load chart: %s", err.Error()) + return ctrl.Result{Requeue: true}, nil } // Reconcile Helm release - reconciledHr, reconcileErr := r.reconcileRelease(ctx, *hr.DeepCopy(), chart, values) - if reconcileErr != nil { - r.event(ctx, hr, hc.GetArtifact().Revision, events.EventSeverityError, - fmt.Sprintf("reconciliation failed: %s", reconcileErr.Error())) + if result, err := r.reconcileRelease(ctx, obj, chart, values); err != nil { + obj.IncrementFailureCounter() + // TODO: Set a better reason + r.Eventf(ctx, obj, events.EventSeverityError, v2.ArtifactFailedReason, "reconciliation failed: %s", err.Error()) + return result, err } - return reconciledHr, ctrl.Result{RequeueAfter: hr.Spec.Interval.Duration}, reconcileErr + return ctrl.Result{RequeueAfter: obj.Spec.Interval.Duration}, nil } -type HelmReleaseReconcilerOptions struct { - MaxConcurrentReconciles int - HTTPRetry int - DependencyRequeueInterval time.Duration -} - -func (r *HelmReleaseReconciler) reconcileRelease(ctx context.Context, - hr v2.HelmRelease, chart *chart.Chart, values chartutil.Values) (v2.HelmRelease, error) { +func (r *HelmReleaseReconciler) reconcileRelease(ctx context.Context, hr *v2.HelmRelease, chart *chart.Chart, values chartutil.Values) (ctrl.Result, error) { log := logr.FromContext(ctx) // Initialize Helm action runner getter, err := r.getRESTClientGetter(ctx, hr) if err != nil { - return v2.HelmReleaseNotReady(hr, v2.InitFailedReason, err.Error()), err + hr.IncrementFailureCounter() + conditions.MarkTrue(hr, v2.InitFailedCondition, v2.InitFailedReason, err.Error()) + r.Eventf(ctx, hr, events.EventSeverityError, v2.InitFailedReason, err.Error()) + return ctrl.Result{}, err } run, err := runner.NewRunner(getter, hr.GetStorageNamespace(), log) if err != nil { - return v2.HelmReleaseNotReady(hr, v2.InitFailedReason, "failed to initialize Helm action runner"), err + hr.IncrementFailureCounter() + conditions.MarkTrue(hr, v2.InitFailedCondition, v2.InitFailedReason, "failed to initialize Helm action runner: %s", err) + r.Eventf(ctx, hr, events.EventSeverityError, v2.InitFailedReason, "failed to initialize Helm action runner: %s", err) + return ctrl.Result{}, err } // Determine last release revision. - rel, observeLastReleaseErr := run.ObserveLastRelease(hr) + rel, observeLastReleaseErr := run.ObserveLastRelease(*hr) if observeLastReleaseErr != nil { + hr.IncrementFailureCounter() err = fmt.Errorf("failed to get last release revision: %w", observeLastReleaseErr) - return v2.HelmReleaseNotReady(hr, v2.GetLastReleaseFailedReason, "failed to get last release revision"), err + conditions.MarkTrue(hr, v2.InitFailedCondition, v2.GetLastReleaseFailedReason, err.Error()) + r.Eventf(ctx, hr, events.EventSeverityError, v2.GetLastReleaseFailedReason, err.Error()) + return ctrl.Result{}, err } // Register the current release attempt. @@ -300,58 +350,60 @@ func (r *HelmReleaseReconciler) reconcileRelease(ctx context.Context, valuesChecksum := util.ValuesChecksum(values) hr, hasNewState := v2.HelmReleaseAttempted(hr, revision, releaseRevision, valuesChecksum) if hasNewState { - hr = v2.HelmReleaseProgressing(hr) - if updateStatusErr := r.patchStatus(ctx, &hr); updateStatusErr != nil { - log.Error(updateStatusErr, "unable to update status after state update") - return hr, updateStatusErr - } - // Record progressing status - r.recordReadiness(ctx, hr) + conditions.MarkUnknown(hr, v2.ReleasedCondition, meta.ProgressingReason, "Reconciliation in progress") + hr.ResetFailureCounter() } // Check status of any previous release attempt. - released := apimeta.FindStatusCondition(hr.Status.Conditions, v2.ReleasedCondition) + released := conditions.Get(hr, v2.ReleasedCondition) if released != nil { switch released.Status { // Succeed if the previous release attempt succeeded. case metav1.ConditionTrue: - return v2.HelmReleaseReady(hr), nil + return ctrl.Result{}, nil case metav1.ConditionFalse: // Fail if the previous release attempt remediation failed. - remediated := apimeta.FindStatusCondition(hr.Status.Conditions, v2.RemediatedCondition) + remediated := conditions.Get(hr, v2.RemediatedCondition) if remediated != nil && remediated.Status == metav1.ConditionFalse { err = fmt.Errorf("previous release attempt remediation failed") - return v2.HelmReleaseNotReady(hr, remediated.Reason, remediated.Message), err + return ctrl.Result{}, err } } // Fail if install retries are exhausted. if hr.Spec.GetInstall().GetRemediation().RetriesExhausted(hr) { + hr.IncrementFailureCounter() err = fmt.Errorf("install retries exhausted") - return v2.HelmReleaseNotReady(hr, released.Reason, err.Error()), err + conditions.MarkTrue(hr, v2.RetriesExhaustedCondition, released.Reason, err.Error()) + r.Eventf(ctx, hr, events.EventSeverityError, released.Reason, err.Error()) + return ctrl.Result{}, err } // Fail if there is a release and upgrade retries are exhausted. // This avoids failing after an upgrade uninstall remediation strategy. if rel != nil && hr.Spec.GetUpgrade().GetRemediation().RetriesExhausted(hr) { + hr.IncrementFailureCounter() err = fmt.Errorf("upgrade retries exhausted") - return v2.HelmReleaseNotReady(hr, released.Reason, err.Error()), err + conditions.MarkTrue(hr, v2.RetriesExhaustedCondition, released.Reason, err.Error()) + r.Eventf(ctx, hr, events.EventSeverityError, released.Reason, err.Error()) + return ctrl.Result{}, err } } + conditions.Delete(hr, v2.RetriesExhaustedCondition) // Deploy the release. var deployAction v2.DeploymentAction if rel == nil { - r.event(ctx, hr, revision, events.EventSeverityInfo, "Helm install has started") + r.Event(ctx, hr, events.EventSeverityInfo, "Helm install has started", revision) deployAction = hr.Spec.GetInstall() - rel, err = run.Install(hr, chart, values) - err = r.handleHelmActionResult(ctx, &hr, revision, err, deployAction.GetDescription(), + rel, err = run.Install(*hr, chart, values) + err = r.handleHelmActionResult(ctx, hr, revision, err, deployAction.GetDescription(), v2.ReleasedCondition, v2.InstallSucceededReason, v2.InstallFailedReason) } else { - r.event(ctx, hr, revision, events.EventSeverityInfo, "Helm upgrade has started") + r.Event(ctx, hr, events.EventSeverityInfo, "Helm upgrade has started", revision) deployAction = hr.Spec.GetUpgrade() - rel, err = run.Upgrade(hr, chart, values) - err = r.handleHelmActionResult(ctx, &hr, revision, err, deployAction.GetDescription(), + rel, err = run.Upgrade(*hr, chart, values) + err = r.handleHelmActionResult(ctx, hr, revision, err, deployAction.GetDescription(), v2.ReleasedCondition, v2.UpgradeSucceededReason, v2.UpgradeFailedReason) } remediation := deployAction.GetRemediation() @@ -359,40 +411,43 @@ func (r *HelmReleaseReconciler) reconcileRelease(ctx context.Context, // If there is a new release revision... if util.ReleaseRevision(rel) > releaseRevision { // Ensure release is not marked remediated. - apimeta.RemoveStatusCondition(&hr.Status.Conditions, v2.RemediatedCondition) + conditions.Delete(hr, v2.RemediatedCondition) // If new release revision is successful and tests are enabled, run them. if err == nil && hr.Spec.GetTest().Enable { - _, testErr := run.Test(hr) - testErr = r.handleHelmActionResult(ctx, &hr, revision, testErr, "test", + _, testErr := run.Test(*hr) + testErr = r.handleHelmActionResult(ctx, hr, revision, testErr, "test", v2.TestSuccessCondition, v2.TestSucceededReason, v2.TestFailedReason) // Propagate any test error if not marked ignored. if testErr != nil && !remediation.MustIgnoreTestFailures(hr.Spec.GetTest().IgnoreFailures) { - testsPassing := apimeta.FindStatusCondition(hr.Status.Conditions, v2.TestSuccessCondition) - meta.SetResourceCondition(&hr, v2.ReleasedCondition, metav1.ConditionFalse, testsPassing.Reason, testsPassing.Message) - err = testErr + testsPassing := conditions.Get(hr, v2.TestSuccessCondition) + if testsPassing != nil { + hr.IncrementFailureCounter() + conditions.MarkFalse(hr, v2.ReleasedCondition, testsPassing.Reason, testsPassing.Message) + err = testErr + } } } } if err != nil { // Increment failure count for deployment action. - remediation.IncrementFailureCount(&hr) + remediation.IncrementFailureCount(hr) // Remediate deployment failure if necessary. if !remediation.RetriesExhausted(hr) || remediation.MustRemediateLastFailure() { if util.ReleaseRevision(rel) <= releaseRevision { - log.Info(fmt.Sprintf("skipping remediation, no new release revision created")) + log.Info("skipping remediation, no new release revision created") } else { var remediationErr error switch remediation.GetStrategy() { case v2.RollbackRemediationStrategy: - rollbackErr := run.Rollback(hr) - remediationErr = r.handleHelmActionResult(ctx, &hr, revision, rollbackErr, "rollback", + rollbackErr := run.Rollback(*hr) + remediationErr = r.handleHelmActionResult(ctx, hr, revision, rollbackErr, "rollback", v2.RemediatedCondition, v2.RollbackSucceededReason, v2.RollbackFailedReason) case v2.UninstallRemediationStrategy: - uninstallErr := run.Uninstall(hr) - remediationErr = r.handleHelmActionResult(ctx, &hr, revision, uninstallErr, "uninstall", + uninstallErr := run.Uninstall(*hr) + remediationErr = r.handleHelmActionResult(ctx, hr, revision, uninstallErr, "uninstall", v2.RemediatedCondition, v2.UninstallSucceededReason, v2.UninstallFailedReason) } if remediationErr != nil { @@ -401,7 +456,7 @@ func (r *HelmReleaseReconciler) reconcileRelease(ctx context.Context, } // Determine release after remediation. - rel, observeLastReleaseErr = run.ObserveLastRelease(hr) + rel, observeLastReleaseErr = run.ObserveLastRelease(*hr) if observeLastReleaseErr != nil { err = &ConditionError{ Reason: v2.GetLastReleaseFailedReason, @@ -413,40 +468,44 @@ func (r *HelmReleaseReconciler) reconcileRelease(ctx context.Context, hr.Status.LastReleaseRevision = util.ReleaseRevision(rel) + // TODO: This should be replaced as part of issue #324 if err != nil { - reason := meta.ReconciliationFailedReason + // TODO: Is this the correct reason? + reason := v2.InstallFailedReason if condErr := (*ConditionError)(nil); errors.As(err, &condErr) { reason = condErr.Reason } - return v2.HelmReleaseNotReady(hr, reason, err.Error()), err + hr.IncrementFailureCounter() + conditions.MarkFalse(hr, v2.ReleasedCondition, reason, err.Error()) + return ctrl.Result{}, err } - return v2.HelmReleaseReady(hr), nil + + hr.ResetFailureCounter() + return ctrl.Result{}, nil } -func (r *HelmReleaseReconciler) checkDependencies(hr v2.HelmRelease) error { +func (r *HelmReleaseReconciler) checkDependencies(ctx context.Context, hr *v2.HelmRelease) error { for _, d := range hr.Spec.DependsOn { if d.Namespace == "" { d.Namespace = hr.GetNamespace() } - dName := types.NamespacedName(d) + dName := types.NamespacedName{Namespace: d.Namespace, Name: d.Name} var dHr v2.HelmRelease - err := r.Get(context.Background(), dName, &dHr) + err := r.Get(ctx, dName, &dHr) if err != nil { return fmt.Errorf("unable to get '%s' dependency: %w", dName, err) } - if len(dHr.Status.Conditions) == 0 || dHr.Generation != dHr.Status.ObservedGeneration { return fmt.Errorf("dependency '%s' is not ready", dName) } - - if !apimeta.IsStatusConditionTrue(dHr.Status.Conditions, meta.ReadyCondition) { + if !conditions.IsReady(&dHr) { return fmt.Errorf("dependency '%s' is not ready", dName) } } return nil } -func (r *HelmReleaseReconciler) getRESTClientGetter(ctx context.Context, hr v2.HelmRelease) (genericclioptions.RESTClientGetter, error) { +func (r *HelmReleaseReconciler) getRESTClientGetter(ctx context.Context, hr *v2.HelmRelease) (genericclioptions.RESTClientGetter, error) { if hr.Spec.KubeConfig == nil { // impersonate service account if specified if hr.Spec.ServiceAccountName != "" { @@ -472,7 +531,7 @@ func (r *HelmReleaseReconciler) getRESTClientGetter(ctx context.Context, hr v2.H } var kubeConfig []byte - for k, _ := range secret.Data { + for k := range secret.Data { if k == "value" || k == "value.yaml" { kubeConfig = secret.Data[k] break @@ -485,7 +544,7 @@ func (r *HelmReleaseReconciler) getRESTClientGetter(ctx context.Context, hr v2.H return kube.NewMemoryRESTClientGetter(kubeConfig, hr.GetReleaseNamespace()), nil } -func (r *HelmReleaseReconciler) getServiceAccountToken(ctx context.Context, hr v2.HelmRelease) (string, error) { +func (r *HelmReleaseReconciler) getServiceAccountToken(ctx context.Context, hr *v2.HelmRelease) (string, error) { namespacedName := types.NamespacedName{ Namespace: hr.Namespace, Name: hr.Spec.ServiceAccountName, @@ -528,7 +587,7 @@ func (r *HelmReleaseReconciler) getServiceAccountToken(ctx context.Context, hr v // composeValues attempts to resolve all v2beta1.ValuesReference resources // and merges them as defined. Referenced resources are only retrieved once // to ensure a single version is taken into account during the merge. -func (r *HelmReleaseReconciler) composeValues(ctx context.Context, hr v2.HelmRelease) (chartutil.Values, error) { +func (r *HelmReleaseReconciler) composeValues(ctx context.Context, hr *v2.HelmRelease) (chartutil.Values, error) { result := chartutil.Values{} configMaps := make(map[string]*corev1.ConfigMap) @@ -641,11 +700,9 @@ func (r *HelmReleaseReconciler) composeValues(ctx context.Context, hr v2.HelmRel // reconcileDelete deletes the v1beta1.HelmChart of the v2beta1.HelmRelease, // and uninstalls the Helm release if the resource has not been suspended. -func (r *HelmReleaseReconciler) reconcileDelete(ctx context.Context, hr v2.HelmRelease) (ctrl.Result, error) { - r.recordReadiness(ctx, hr) - +func (r *HelmReleaseReconciler) reconcileDelete(ctx context.Context, hr *v2.HelmRelease) (ctrl.Result, error) { // Delete the HelmChart that belongs to this resource. - if err := r.deleteHelmChart(ctx, &hr); err != nil { + if err := r.deleteHelmChart(ctx, hr); err != nil { return ctrl.Result{}, err } @@ -659,21 +716,16 @@ func (r *HelmReleaseReconciler) reconcileDelete(ctx context.Context, hr v2.HelmR if err != nil { return ctrl.Result{}, err } - if err := run.Uninstall(hr); err != nil && !errors.Is(err, driver.ErrReleaseNotFound) { + if err := run.Uninstall(*hr); err != nil && !errors.Is(err, driver.ErrReleaseNotFound) { return ctrl.Result{}, err } logr.FromContext(ctx).Info("uninstalled Helm release for deleted resource") - } else { logr.FromContext(ctx).Info("skipping Helm uninstall for suspended resource") } - // Remove our finalizer from the list and update it. - controllerutil.RemoveFinalizer(&hr, v2.HelmReleaseFinalizer) - if err := r.Update(ctx, &hr); err != nil { - return ctrl.Result{}, err - } - + // Remove our finalizer from the list + controllerutil.RemoveFinalizer(hr, v2.HelmReleaseFinalizer) return ctrl.Result{}, nil } @@ -685,26 +737,17 @@ func (r *HelmReleaseReconciler) handleHelmActionResult(ctx context.Context, if actionErr := (*runner.ActionError)(nil); errors.As(err, &actionErr) { msg = msg + "\n\nLast Helm logs:\n\n" + actionErr.CapturedLogs } - meta.SetResourceCondition(hr, condition, metav1.ConditionFalse, failedReason, msg) - r.event(ctx, *hr, revision, events.EventSeverityError, msg) + hr.IncrementFailureCounter() + conditions.MarkFalse(hr, condition, failedReason, msg) + r.Eventf(ctx, hr, events.EventSeverityError, failedReason, msg) return &ConditionError{Reason: failedReason, Err: err} } else { - msg := fmt.Sprintf("Helm %s succeeded", action) - meta.SetResourceCondition(hr, condition, metav1.ConditionTrue, succeededReason, msg) - r.event(ctx, *hr, revision, events.EventSeverityInfo, msg) + conditions.MarkTrue(hr, condition, succeededReason, "Helm %s succeeded", action) + r.Eventf(ctx, hr, events.EventSeverityInfo, succeededReason, "Helm %s succeeded", action) return nil } } -func (r *HelmReleaseReconciler) patchStatus(ctx context.Context, hr *v2.HelmRelease) error { - key := client.ObjectKeyFromObject(hr) - latest := &v2.HelmRelease{} - if err := r.Client.Get(ctx, key, latest); err != nil { - return err - } - return r.Client.Status().Patch(ctx, hr, client.MergeFrom(latest)) -} - func (r *HelmReleaseReconciler) requestsForHelmChartChange(o client.Object) []reconcile.Request { hc, ok := o.(*sourcev1.HelmChart) if !ok { @@ -734,63 +777,3 @@ func (r *HelmReleaseReconciler) requestsForHelmChartChange(o client.Object) []re } return reqs } - -// event emits a Kubernetes event and forwards the event to notification controller if configured. -func (r *HelmReleaseReconciler) event(ctx context.Context, hr v2.HelmRelease, revision, severity, msg string) { - r.EventRecorder.Event(&hr, "Normal", severity, msg) - objRef, err := reference.GetReference(r.Scheme, &hr) - if err != nil { - logr.FromContext(ctx).Error(err, "unable to send event") - return - } - - if r.ExternalEventRecorder != nil { - var meta map[string]string - if revision != "" { - meta = map[string]string{"revision": revision} - } - if err := r.ExternalEventRecorder.Eventf(*objRef, meta, severity, severity, msg); err != nil { - logr.FromContext(ctx).Error(err, "unable to send event") - return - } - } -} - -func (r *HelmReleaseReconciler) recordSuspension(ctx context.Context, hr v2.HelmRelease) { - if r.MetricsRecorder == nil { - return - } - log := logr.FromContext(ctx) - - objRef, err := reference.GetReference(r.Scheme, &hr) - if err != nil { - log.Error(err, "unable to record suspended metric") - return - } - - if !hr.DeletionTimestamp.IsZero() { - r.MetricsRecorder.RecordSuspend(*objRef, false) - } else { - r.MetricsRecorder.RecordSuspend(*objRef, hr.Spec.Suspend) - } -} - -func (r *HelmReleaseReconciler) recordReadiness(ctx context.Context, hr v2.HelmRelease) { - if r.MetricsRecorder == nil { - return - } - - objRef, err := reference.GetReference(r.Scheme, &hr) - if err != nil { - logr.FromContext(ctx).Error(err, "unable to record readiness metric") - return - } - if rc := apimeta.FindStatusCondition(hr.Status.Conditions, meta.ReadyCondition); rc != nil { - r.MetricsRecorder.RecordCondition(*objRef, *rc, !hr.DeletionTimestamp.IsZero()) - } else { - r.MetricsRecorder.RecordCondition(*objRef, metav1.Condition{ - Type: meta.ReadyCondition, - Status: metav1.ConditionUnknown, - }, !hr.DeletionTimestamp.IsZero()) - } -} diff --git a/controllers/helmrelease_controller_chart.go b/controllers/helmrelease_controller_chart.go index 8a15d3f85..9b3f0e5e7 100644 --- a/controllers/helmrelease_controller_chart.go +++ b/controllers/helmrelease_controller_chart.go @@ -78,18 +78,6 @@ func (r *HelmReleaseReconciler) reconcileChart(ctx context.Context, hr *v2.HelmR return &helmChart, nil } -// getHelmChart retrieves the v1beta1.HelmChart for the given -// v2beta1.HelmRelease using the name that is advertised in the status -// object. It returns the v1beta1.HelmChart, or an error. -func (r *HelmReleaseReconciler) getHelmChart(ctx context.Context, hr *v2.HelmRelease) (*sourcev1.HelmChart, error) { - namespace, name := hr.Status.GetHelmChart() - hc := &sourcev1.HelmChart{} - if err := r.Client.Get(ctx, types.NamespacedName{Namespace: namespace, Name: name}, hc); err != nil { - return nil, err - } - return hc, nil -} - // loadHelmChart attempts to download the artifact from the provided source, // loads it into a chart.Chart, and removes the downloaded artifact. // It returns the loaded chart.Chart on success, or an error. @@ -174,10 +162,9 @@ func buildHelmChartFromTemplate(hr *v2.HelmRelease) *sourcev1.HelmChart { Name: template.Spec.SourceRef.Name, Kind: template.Spec.SourceRef.Kind, }, - Interval: template.GetInterval(hr.Spec.Interval), - ReconcileStrategy: template.Spec.ReconcileStrategy, - ValuesFiles: template.Spec.ValuesFiles, - ValuesFile: template.Spec.ValuesFile, + Interval: template.GetInterval(hr.Spec.Interval), + ValuesFiles: template.Spec.ValuesFiles, + ValuesFile: template.Spec.ValuesFile, }, } } @@ -200,8 +187,6 @@ func helmChartRequiresUpdate(hr *v2.HelmRelease, chart *sourcev1.HelmChart) bool return true case template.GetInterval(hr.Spec.Interval) != chart.Spec.Interval: return true - case template.Spec.ReconcileStrategy != chart.Spec.ReconcileStrategy: - return true case !reflect.DeepEqual(template.Spec.ValuesFiles, chart.Spec.ValuesFiles): return true case template.Spec.ValuesFile != chart.Spec.ValuesFile: diff --git a/controllers/helmrelease_controller_chart_test.go b/controllers/helmrelease_controller_chart_test.go index 48a62027e..9df18ef85 100644 --- a/controllers/helmrelease_controller_chart_test.go +++ b/controllers/helmrelease_controller_chart_test.go @@ -420,13 +420,6 @@ func Test_helmChartRequiresUpdate(t *testing.T) { }, want: true, }, - { - name: "detects reconcile strategy change", - modify: func(hr *v2.HelmRelease, hc *sourcev1.HelmChart) { - hr.Spec.Chart.Spec.ReconcileStrategy = "Revision" - }, - want: true, - }, { name: "detects values files change", modify: func(hr *v2.HelmRelease, hc *sourcev1.HelmChart) { diff --git a/controllers/helmrelease_controller_test.go b/controllers/helmrelease_controller_test.go index d18763086..4f32c8e91 100644 --- a/controllers/helmrelease_controller_test.go +++ b/controllers/helmrelease_controller_test.go @@ -261,7 +261,7 @@ invalid`, v, _ := yaml.YAMLToJSON([]byte(tt.values)) values = &apiextensionsv1.JSON{Raw: v} } - hr := v2.HelmRelease{ + hr := &v2.HelmRelease{ Spec: v2.HelmReleaseSpec{ ValuesFrom: tt.references, Values: values, diff --git a/docs/api/helmrelease.md b/docs/api/helmrelease.md index 121942186..5d3068d62 100644 --- a/docs/api/helmrelease.md +++ b/docs/api/helmrelease.md @@ -166,14 +166,14 @@ Defaults to the namespace of the HelmRelease.
dependsOnDependsOn may contain a dependency.CrossNamespaceDependencyReference slice with +
DependsOn may contain a meta.NamespacedObjectReference slice with references to HelmRelease resources that must be ready before this HelmRelease can be reconciled.
reconcileStrategyDetermines what enables the creation of a new artifact. Valid values are -(‘ChartVersion’, ‘Revision’). -See the documentation of the values for an explanation on their behavior. -Defaults to ChartVersion when omitted.
-valuesFilesreconcileStrategyDetermines what enables the creation of a new artifact. Valid values are -(‘ChartVersion’, ‘Revision’). -See the documentation of the values for an explanation on their behavior. -Defaults to ChartVersion when omitted.
-valuesFilesdependsOnDependsOn may contain a dependency.CrossNamespaceDependencyReference slice with +
DependsOn may contain a meta.NamespacedObjectReference slice with references to HelmRelease resources that must be ready before this HelmRelease can be reconciled.