Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

### Added

- Added [`run`](./doc/cli/operator-sdk_alpha_run.md) and [`cleanup`](./doc/cli/operator-sdk_alpha_cleanup.md) subcommands (under the `alpha` subcommand) to manage deployment/deletion of operators. These commands currently interact with OLM via an in-cluster registry-server created using an operator's on-disk manifests and managed by `operator-sdk`. ([#2402](ttps://github.com/operator-framework/operator-sdk/pull/2402))
- Added [`run`](./doc/cli/operator-sdk_alpha_run.md) and [`cleanup`](./doc/cli/operator-sdk_alpha_cleanup.md) subcommands (under the `alpha` subcommand) to manage deployment/deletion of operators. These commands currently interact with OLM via an in-cluster registry-server created using an operator's on-disk manifests and managed by `operator-sdk`. ([#2402](https://github.com/operator-framework/operator-sdk/pull/2402))
- Added [`bundle build`](./doc/cli/operator-sdk_alpha_bundle_build.md) (under the `alpha` subcommand) which builds, and optionally generates metadata for, [operator bundle images](https://github.com/openshift/enhancements/blob/ec2cf96/enhancements/olm/operator-registry.md). ([#2076](https://github.com/operator-framework/operator-sdk/pull/2076))

### Changed
Expand All @@ -14,6 +14,7 @@

### Bug Fixes

- Fixed a regression in the helm-operator that caused all releases to be deployed in the same namespace that the operator was deployed in, regardless of which namespace the CR was created in. Now release resources are created in the same namespace as the CR. ([#2414](https://github.com/operator-framework/operator-sdk/pull/2414))
- Fix issue when the test-framework would attempt to create a namespace exceeding 63 characters. `pkg/test/NewCtx()` now creates a unique id instead of using the test name. `TestCtx.GetNamespace()` uses this unique id to create a namespace that avoids this scenario. ([#2335](https://github.com/operator-framework/operator-sdk/pull/2335))

## v0.14.0
Expand Down
66 changes: 38 additions & 28 deletions hack/tests/e2e-helm.sh
Original file line number Diff line number Diff line change
Expand Up @@ -10,20 +10,24 @@ ROOTDIR="$(pwd)"
TMPDIR="$(mktemp -d)"
trap_add 'rm -rf $TMPDIR' EXIT

test_namespace="test-e2e-helm"

deploy_operator() {
kubectl create -f "$OPERATORDIR/deploy/service_account.yaml"
kubectl create -f "$OPERATORDIR/deploy/role.yaml"
kubectl create -f "$OPERATORDIR/deploy/role_binding.yaml"
kubectl create -f "$OPERATORDIR/deploy/crds/helm.example.com_nginxes_crd.yaml"
kubectl create -f "$OPERATORDIR/deploy/operator.yaml"
kubectl create -f "$OPERATORDIR/deploy/service_account.yaml"
kubectl create -f "$OPERATORDIR/deploy/cluster_role.yaml"
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The addition of --namespace below makes sense, but why is the test operator now cluster-scoped? Shouldn't its manifests just have a different namespace?

Copy link
Copy Markdown
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The change to the operator.yaml is to make it watch all namespaces so that it can see CRs outside of its own namespace. Not sure if that answers your question...

kubectl create -f "$OPERATORDIR/deploy/cluster_role_binding.yaml"
kubectl create -f "$OPERATORDIR/deploy/cluster_operator.yaml"
kubectl create namespace ${test_namespace}
}

remove_operator() {
kubectl delete --ignore-not-found namespace ${test_namespace}
kubectl delete --ignore-not-found=true -f "$OPERATORDIR/deploy/service_account.yaml"
kubectl delete --ignore-not-found=true -f "$OPERATORDIR/deploy/role.yaml"
kubectl delete --ignore-not-found=true -f "$OPERATORDIR/deploy/role_binding.yaml"
kubectl delete --ignore-not-found=true -f "$OPERATORDIR/deploy/cluster_role.yaml"
kubectl delete --ignore-not-found=true -f "$OPERATORDIR/deploy/cluster_role_binding.yaml"
kubectl delete --ignore-not-found=true -f "$OPERATORDIR/deploy/crds/helm.example.com_nginxes_crd.yaml"
kubectl delete --ignore-not-found=true -f "$OPERATORDIR/deploy/operator.yaml"
kubectl delete --ignore-not-found=true -f "$OPERATORDIR/deploy/cluster_operator.yaml"
}

test_operator() {
Expand All @@ -46,68 +50,69 @@ test_operator() {
exit 1
fi


# verify that the metrics endpoint exists
if ! timeout 1m bash -c -- "until kubectl run --attach --rm --restart=Never test-metrics --image=$metrics_test_image -- curl -sfo /dev/null http://nginx-operator-metrics:8383/metrics; do sleep 1; done";
if ! timeout 1m bash -c -- "until kubectl run --attach --rm --restart=Never test-metrics --image=${metrics_test_image} -- curl -sfo /dev/null http://nginx-operator-metrics:8383/metrics; do sleep 1; done";
then
echo "Failed to verify that metrics endpoint exists"
kubectl logs deployment/nginx-operator
exit 1
fi

# create CR
kubectl create -f deploy/crds/helm.example.com_v1alpha1_nginx_cr.yaml
trap_add 'kubectl delete --ignore-not-found -f ${OPERATORDIR}/deploy/crds/helm.example.com_v1alpha1_nginx_cr.yaml' EXIT
if ! timeout 1m bash -c -- 'until kubectl get nginxes.helm.example.com example-nginx -o jsonpath="{..status.deployedRelease.name}" | grep "example-nginx"; do sleep 1; done';
kubectl create --namespace=${test_namespace} -f deploy/crds/helm.example.com_v1alpha1_nginx_cr.yaml
trap_add "kubectl delete --namespace=${test_namespace} --ignore-not-found -f ${OPERATORDIR}/deploy/crds/helm.example.com_v1alpha1_nginx_cr.yaml" EXIT
if ! timeout 1m bash -c -- "until kubectl get --namespace=${test_namespace} nginxes.helm.example.com example-nginx -o jsonpath='{..status.deployedRelease.name}' | grep 'example-nginx'; do sleep 1; done";
then
kubectl logs deployment/nginx-operator
exit 1
fi

# verify that the custom resource metrics endpoint exists
if ! timeout 1m bash -c -- "until kubectl run --attach --rm --restart=Never test-cr-metrics --image=$metrics_test_image -- curl -sfo /dev/null http://nginx-operator-metrics:8686/metrics; do sleep 1; done";
if ! timeout 1m bash -c -- "until kubectl run --attach --rm --restart=Never test-cr-metrics --image=${metrics_test_image} -- curl -sfo /dev/null http://nginx-operator-metrics:8686/metrics; do sleep 1; done";
then
echo "Failed to verify that custom resource metrics endpoint exists"
kubectl logs deployment/nginx-operator
exit 1
fi

release_name=$(kubectl get nginxes.helm.example.com example-nginx -o jsonpath="{..status.deployedRelease.name}")
nginx_deployment=$(kubectl get deployment -l "app.kubernetes.io/instance=${release_name}" -o jsonpath="{..metadata.name}")
release_name=$(kubectl get --namespace=${test_namespace} nginxes.helm.example.com example-nginx -o jsonpath="{..status.deployedRelease.name}")
nginx_deployment=$(kubectl get --namespace=${test_namespace} deployment -l "app.kubernetes.io/instance=${release_name}" -o jsonpath="{..metadata.name}")

if ! timeout 1m kubectl rollout status deployment/${nginx_deployment};
if ! timeout 1m kubectl rollout --namespace=${test_namespace} status deployment/${nginx_deployment};
then
kubectl describe pods -l "app.kubernetes.io/instance=${release_name}"
kubectl describe deployments ${nginx_deployment}
kubectl describe --namespace=${test_namespace} pods -l "app.kubernetes.io/instance=${release_name}"
kubectl describe --namespace=${test_namespace} deployments ${nginx_deployment}
kubectl logs deployment/nginx-operator
exit 1
fi

nginx_service=$(kubectl get service -l "app.kubernetes.io/instance=${release_name}" -o jsonpath="{..metadata.name}")
kubectl get service ${nginx_service}
nginx_service=$(kubectl get --namespace=${test_namespace} service -l "app.kubernetes.io/instance=${release_name}" -o jsonpath="{..metadata.name}")
kubectl get --namespace=${test_namespace} service ${nginx_service}

# scale deployment replicas to 2 and verify the
# deployment automatically scales back down to 1.
kubectl scale deployment/${nginx_deployment} --replicas=2
if ! timeout 1m bash -c -- "until test \$(kubectl get deployment/${nginx_deployment} -o jsonpath='{..spec.replicas}') -eq 1; do sleep 1; done";
kubectl scale --namespace=${test_namespace} deployment/${nginx_deployment} --replicas=2
if ! timeout 1m bash -c -- "until test \$(kubectl get --namespace=${test_namespace} deployment/${nginx_deployment} -o jsonpath='{..spec.replicas}') -eq 1; do sleep 1; done";
then
kubectl describe pods -l "app.kubernetes.io/instance=${release_name}"
kubectl describe deployments ${nginx_deployment}
kubectl describe --namespace=${test_namespace} pods -l "app.kubernetes.io/instance=${release_name}"
kubectl describe --namespace=${test_namespace} deployments ${nginx_deployment}
kubectl logs deployment/nginx-operator
exit 1
fi

# update CR to replicaCount=2 and verify the deployment
# automatically scales up to 2 replicas.
kubectl patch nginxes.helm.example.com example-nginx -p '[{"op":"replace","path":"/spec/replicaCount","value":2}]' --type=json
if ! timeout 1m bash -c -- "until test \$(kubectl get deployment/${nginx_deployment} -o jsonpath='{..spec.replicas}') -eq 2; do sleep 1; done";
kubectl patch --namespace=${test_namespace} nginxes.helm.example.com example-nginx -p '[{"op":"replace","path":"/spec/replicaCount","value":2}]' --type=json
if ! timeout 1m bash -c -- "until test \$(kubectl get --namespace=${test_namespace} deployment/${nginx_deployment} -o jsonpath='{..spec.replicas}') -eq 2; do sleep 1; done";
then
kubectl describe pods -l "app.kubernetes.io/instance=${release_name}"
kubectl describe deployments ${nginx_deployment}
kubectl describe --namespace=${test_namespace} pods -l "app.kubernetes.io/instance=${release_name}"
kubectl describe --namespace=${test_namespace} deployments ${nginx_deployment}
kubectl logs deployment/nginx-operator
exit 1
fi

kubectl delete -f deploy/crds/helm.example.com_v1alpha1_nginx_cr.yaml --wait=true
kubectl delete --namespace=${test_namespace} -f deploy/crds/helm.example.com_v1alpha1_nginx_cr.yaml --wait=true
kubectl logs deployment/nginx-operator | grep "Uninstalled release" | grep "${release_name}"
}

Expand All @@ -131,6 +136,11 @@ operator-sdk build "$DEST_IMAGE"
load_image_if_kind "$DEST_IMAGE"
sed -i".bak" -E -e "s|REPLACE_IMAGE|$DEST_IMAGE|g" deploy/operator.yaml; rm -f deploy/operator.yaml.bak
sed -i".bak" -E -e 's|Always|Never|g' deploy/operator.yaml; rm -f deploy/operator.yaml.bak

kubectl create --dry-run -f "deploy/operator.yaml" -o json | jq '((.spec.template.spec.containers[] | select(.name == "nginx-operator").env[]) | select(.name == "WATCH_NAMESPACE")) |= {"name":"WATCH_NAMESPACE", "value":""}' | kubectl create --dry-run -f - -o yaml > deploy/cluster_operator.yaml
kubectl create --dry-run -f "deploy/role.yaml" -o json | jq '.kind = "ClusterRole"' | kubectl create --dry-run -f - -o yaml > deploy/cluster_role.yaml
kubectl create --dry-run -f "deploy/role_binding.yaml" -o json | jq '.subjects[0].namespace= "default"' | jq '.roleRef.kind= "ClusterRole"' | jq '.kind = "ClusterRoleBinding"' | kubectl create --dry-run -f - -o yaml > deploy/cluster_role_binding.yaml

# kind has an issue with certain image registries (ex. redhat's), so use a
# different test pod image.
METRICS_TEST_IMAGE="fedora:latest"
Expand Down
37 changes: 26 additions & 11 deletions pkg/helm/client/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,25 +28,17 @@ import (
cached "k8s.io/client-go/discovery/cached"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"sigs.k8s.io/controller-runtime/pkg/manager"
)

// NewFromManager returns a Kubernetes client that can be used with
// a Tiller server.
func NewFromManager(mgr manager.Manager) (*kube.Client, error) {
c, err := NewRESTClientGetter(mgr)
if err != nil {
return nil, err
}
return kube.New(c), nil
}

Copy link
Copy Markdown
Contributor

@camilamacedo86 camilamacedo86 Jan 17, 2020

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It as unused before. 👍

var _ genericclioptions.RESTClientGetter = &restClientGetter{}

type restClientGetter struct {
restConfig *rest.Config
discoveryClient discovery.CachedDiscoveryInterface
restMapper meta.RESTMapper
namespaceConfig clientcmd.ClientConfig
}

func (c *restClientGetter) ToRESTConfig() (*rest.Config, error) {
Expand All @@ -62,10 +54,32 @@ func (c *restClientGetter) ToRESTMapper() (meta.RESTMapper, error) {
}

func (c *restClientGetter) ToRawKubeConfigLoader() clientcmd.ClientConfig {
return c.namespaceConfig
}

var _ clientcmd.ClientConfig = &namespaceClientConfig{}

type namespaceClientConfig struct {
namespace string
}

func (c namespaceClientConfig) RawConfig() (clientcmdapi.Config, error) {
return clientcmdapi.Config{}, nil
}

func (c namespaceClientConfig) ClientConfig() (*rest.Config, error) {
return nil, nil
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why it is required since always return nil and nil?

Copy link
Copy Markdown
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

These functions are required because ToRawKubeConfigLoader() needs to return a clientcmd.ClientConfig which is an interface that requires that all of these functions be implemented.

As far as I can tell, Helm only uses ToRawKubeConfigLoader().Namespace() and not the other functions (I'll double check this), so it should be okay for those to be no-ops. We could have those unused functions return fmt.Errorf("not implemented") if that makes sense.

Copy link
Copy Markdown
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Just following up on the question of how ToRawKubeConfigLoader() is used. Here's the grep output from the Helm project:

$ grep -r ToRaw *
pkg/kube/factory.go:	// ToRawKubeConfigLoader return kubeconfig loader as-is
pkg/kube/factory.go:	ToRawKubeConfigLoader() clientcmd.ClientConfig
pkg/kube/client.go:	if ns, _, err := c.Factory.ToRawKubeConfigLoader().Namespace(); err == nil {
pkg/cli/environment.go:	if ns, _, err := s.RESTClientGetter().ToRawKubeConfigLoader().Namespace(); err == nil {

So yeah, only Namespace() actually needs to be implemented.

}

func (c namespaceClientConfig) Namespace() (string, bool, error) {
return c.namespace, false, nil
}

func (c namespaceClientConfig) ConfigAccess() clientcmd.ConfigAccess {
return nil
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

same here

}

func NewRESTClientGetter(mgr manager.Manager) (genericclioptions.RESTClientGetter, error) {
func NewRESTClientGetter(mgr manager.Manager, ns string) (genericclioptions.RESTClientGetter, error) {
cfg := mgr.GetConfig()
dc, err := discovery.NewDiscoveryClientForConfig(cfg)
if err != nil {
Expand All @@ -78,6 +92,7 @@ func NewRESTClientGetter(mgr manager.Manager) (genericclioptions.RESTClientGette
restConfig: cfg,
discoveryClient: cdc,
restMapper: rm,
namespaceConfig: &namespaceClientConfig{ns},
}, nil
}

Expand Down
4 changes: 2 additions & 2 deletions pkg/helm/release/manager_factory.go
Original file line number Diff line number Diff line change
Expand Up @@ -77,11 +77,11 @@ func (f managerFactory) NewManager(cr *unstructured.Unstructured, overrideValues

// Get the necessary clients and client getters. Use a client that injects the CR
// as an owner reference into all resources templated by the chart.
rcg, err := client.NewRESTClientGetter(f.mgr)
rcg, err := client.NewRESTClientGetter(f.mgr, cr.GetNamespace())
if err != nil {
return nil, fmt.Errorf("failed to get REST client getter from manager: %w", err)
}
kubeClient := kube.New(nil)
kubeClient := kube.New(rcg)
ownerRef := metav1.NewControllerRef(cr, cr.GroupVersionKind())
ownerRefClient := client.NewOwnerRefInjectingClient(*kubeClient, *ownerRef)

Expand Down