diff --git a/example/kb-memcached-operator/memcached_controller.go.tmpl b/example/kb-memcached-operator/memcached_controller.go.tmpl deleted file mode 100644 index 9ec68405e8..0000000000 --- a/example/kb-memcached-operator/memcached_controller.go.tmpl +++ /dev/null @@ -1,182 +0,0 @@ -/* -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controllers - -import ( - "context" - "reflect" - - "github.com/go-logr/logr" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - - cachev1alpha1 "github.com/example-inc/memcached-operator/api/v1alpha1" -) - -// MemcachedReconciler reconciles a Memcached object -type MemcachedReconciler struct { - client.Client - Log logr.Logger - Scheme *runtime.Scheme -} - -// +kubebuilder:rbac:groups=cache.example.com,resources=memcacheds,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=cache.example.com,resources=memcacheds/status,verbs=get;update;patch -// +kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list; - -func (r *MemcachedReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { - ctx := context.Background() - log := r.Log.WithValues("memcached", req.NamespacedName) - - // Fetch the Memcached instance - memcached := &cachev1alpha1.Memcached{} - err := r.Get(ctx, req.NamespacedName, memcached) - if err != nil { - if errors.IsNotFound(err) { - // Request object not found, could have been deleted after reconcile request. - // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers. - // Return and don't requeue - log.Info("Memcached resource not found. Ignoring since object must be deleted") - return ctrl.Result{}, nil - } - // Error reading the object - requeue the request. - log.Error(err, "Failed to get Memcached") - return ctrl.Result{}, err - } - - // Check if the deployment already exists, if not create a new one - found := &appsv1.Deployment{} - err = r.Get(ctx, types.NamespacedName{Name: memcached.Name, Namespace: memcached.Namespace}, found) - if err != nil && errors.IsNotFound(err) { - // Define a new deployment - dep := r.deploymentForMemcached(memcached) - log.Info("Creating a new Deployment", "Deployment.Namespace", dep.Namespace, "Deployment.Name", dep.Name) - err = r.Create(ctx, dep) - if err != nil { - log.Error(err, "Failed to create new Deployment", "Deployment.Namespace", dep.Namespace, "Deployment.Name", dep.Name) - return ctrl.Result{}, err - } - // Deployment created successfully - return and requeue - return ctrl.Result{Requeue: true}, nil - } else if err != nil { - log.Error(err, "Failed to get Deployment") - return ctrl.Result{}, err - } - - // Ensure the deployment size is the same as the spec - size := memcached.Spec.Size - if *found.Spec.Replicas != size { - found.Spec.Replicas = &size - err = r.Update(ctx, found) - if err != nil { - log.Error(err, "Failed to update Deployment", "Deployment.Namespace", found.Namespace, "Deployment.Name", found.Name) - return ctrl.Result{}, err - } - // Spec updated - return and requeue - return ctrl.Result{Requeue: true}, nil - } - - // Update the Memcached status with the pod names - // List the pods for this memcached's deployment - podList := &corev1.PodList{} - listOpts := []client.ListOption{ - client.InNamespace(memcached.Namespace), - client.MatchingLabels(labelsForMemcached(memcached.Name)), - } - if err = r.List(ctx, podList, listOpts...); err != nil { - log.Error(err, "Failed to list pods", "Memcached.Namespace", memcached.Namespace, "Memcached.Name", memcached.Name) - return ctrl.Result{}, err - } - podNames := getPodNames(podList.Items) - - // Update status.Nodes if needed - if !reflect.DeepEqual(podNames, memcached.Status.Nodes) { - memcached.Status.Nodes = podNames - err := r.Status().Update(ctx, memcached) - if err != nil { - log.Error(err, "Failed to update Memcached status") - return ctrl.Result{}, err - } - } - - return ctrl.Result{}, nil -} - -// deploymentForMemcached returns a memcached Deployment object -func (r *MemcachedReconciler) deploymentForMemcached(m *cachev1alpha1.Memcached) *appsv1.Deployment { - ls := labelsForMemcached(m.Name) - replicas := m.Spec.Size - - dep := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: m.Name, - Namespace: m.Namespace, - }, - Spec: appsv1.DeploymentSpec{ - Replicas: &replicas, - Selector: &metav1.LabelSelector{ - MatchLabels: ls, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: ls, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{{ - Image: "memcached:1.4.36-alpine", - Name: "memcached", - Command: []string{"memcached", "-m=64", "-o", "modern", "-v"}, - Ports: []corev1.ContainerPort{{ - ContainerPort: 11211, - Name: "memcached", - }}, - }}, - }, - }, - }, - } - // Set Memcached instance as the owner and controller - ctrl.SetControllerReference(m, dep, r.Scheme) - return dep -} - -// labelsForMemcached returns the labels for selecting the resources -// belonging to the given memcached CR name. -func labelsForMemcached(name string) map[string]string { - return map[string]string{"app": "memcached", "memcached_cr": name} -} - -// getPodNames returns the pod names of the array of pods passed in -func getPodNames(pods []corev1.Pod) []string { - var podNames []string - for _, pod := range pods { - podNames = append(podNames, pod.Name) - } - return podNames -} - -func (r *MemcachedReconciler) SetupWithManager(mgr ctrl.Manager) error { - return ctrl.NewControllerManagedBy(mgr). - For(&cachev1alpha1.Memcached{}). - Owns(&appsv1.Deployment{}). - Complete(r) -} diff --git a/example/memcached-operator/memcached_controller.go.tmpl b/example/memcached-operator/memcached_controller.go.tmpl index a2e3495962..9ec68405e8 100644 --- a/example/memcached-operator/memcached_controller.go.tmpl +++ b/example/memcached-operator/memcached_controller.go.tmpl @@ -1,141 +1,98 @@ -package memcached +/* +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers import ( "context" "reflect" - cachev1alpha1 "github.com/example-inc/memcached-operator/pkg/apis/cache/v1alpha1" - + "github.com/go-logr/logr" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/handler" - logf "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" -) - -var log = logf.Log.WithName("controller_memcached") - -/** -* USER ACTION REQUIRED: This is a scaffold file intended for the user to modify with their own Controller -* business logic. Delete these comments after modifying this file.* - */ - -// Add creates a new Memcached Controller and adds it to the Manager. The Manager will set fields on the Controller -// and Start it when the Manager is Started. -func Add(mgr manager.Manager) error { - return add(mgr, newReconciler(mgr)) -} - -// newReconciler returns a new reconcile.Reconciler -func newReconciler(mgr manager.Manager) reconcile.Reconciler { - return &ReconcileMemcached{client: mgr.GetClient(), scheme: mgr.GetScheme()} -} - -// add adds a new Controller to mgr with r as the reconcile.Reconciler -func add(mgr manager.Manager, r reconcile.Reconciler) error { - // Create a new controller - c, err := controller.New("memcached-controller", mgr, controller.Options{Reconciler: r}) - if err != nil { - return err - } - - // Watch for changes to primary resource Memcached - err = c.Watch(&source.Kind{Type: &cachev1alpha1.Memcached{}}, &handler.EnqueueRequestForObject{}) - if err != nil { - return err - } - // TODO(user): Modify this to be the types you create that are owned by the primary resource - // Watch for changes to secondary resource Pods and requeue the owner Memcached - err = c.Watch(&source.Kind{Type: &appsv1.Deployment{}}, &handler.EnqueueRequestForOwner{ - IsController: true, - OwnerType: &cachev1alpha1.Memcached{}, - }) - if err != nil { - return err - } + cachev1alpha1 "github.com/example-inc/memcached-operator/api/v1alpha1" +) - return nil +// MemcachedReconciler reconciles a Memcached object +type MemcachedReconciler struct { + client.Client + Log logr.Logger + Scheme *runtime.Scheme } -// blank assignment to verify that ReconcileMemcached implements reconcile.Reconciler -var _ reconcile.Reconciler = &ReconcileMemcached{} - -// ReconcileMemcached reconciles a Memcached object -type ReconcileMemcached struct { - // TODO: Clarify the split client - // This client, initialized using mgr.Client() above, is a split client - // that reads objects from the cache and writes to the apiserver - client client.Client - scheme *runtime.Scheme -} +// +kubebuilder:rbac:groups=cache.example.com,resources=memcacheds,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=cache.example.com,resources=memcacheds/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list; -// Reconcile reads that state of the cluster for a Memcached object and makes changes based on the state read -// and what is in the Memcached.Spec -// TODO(user): Modify this Reconcile function to implement your Controller logic. This example creates -// a Memcached Deployment for each Memcached CR -// Note: -// The Controller will requeue the Request to be processed again if the returned error is non-nil or -// Result.Requeue is true, otherwise upon completion it will remove the work from the queue. -func (r *ReconcileMemcached) Reconcile(request reconcile.Request) (reconcile.Result, error) { - reqLogger := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name) - reqLogger.Info("Reconciling Memcached") +func (r *MemcachedReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { + ctx := context.Background() + log := r.Log.WithValues("memcached", req.NamespacedName) // Fetch the Memcached instance memcached := &cachev1alpha1.Memcached{} - err := r.client.Get(context.TODO(), request.NamespacedName, memcached) + err := r.Get(ctx, req.NamespacedName, memcached) if err != nil { if errors.IsNotFound(err) { // Request object not found, could have been deleted after reconcile request. // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers. // Return and don't requeue - reqLogger.Info("Memcached resource not found. Ignoring since object must be deleted") - return reconcile.Result{}, nil + log.Info("Memcached resource not found. Ignoring since object must be deleted") + return ctrl.Result{}, nil } // Error reading the object - requeue the request. - reqLogger.Error(err, "Failed to get Memcached") - return reconcile.Result{}, err + log.Error(err, "Failed to get Memcached") + return ctrl.Result{}, err } // Check if the deployment already exists, if not create a new one found := &appsv1.Deployment{} - err = r.client.Get(context.TODO(), types.NamespacedName{Name: memcached.Name, Namespace: memcached.Namespace}, found) + err = r.Get(ctx, types.NamespacedName{Name: memcached.Name, Namespace: memcached.Namespace}, found) if err != nil && errors.IsNotFound(err) { // Define a new deployment dep := r.deploymentForMemcached(memcached) - reqLogger.Info("Creating a new Deployment", "Deployment.Namespace", dep.Namespace, "Deployment.Name", dep.Name) - err = r.client.Create(context.TODO(), dep) + log.Info("Creating a new Deployment", "Deployment.Namespace", dep.Namespace, "Deployment.Name", dep.Name) + err = r.Create(ctx, dep) if err != nil { - reqLogger.Error(err, "Failed to create new Deployment", "Deployment.Namespace", dep.Namespace, "Deployment.Name", dep.Name) - return reconcile.Result{}, err + log.Error(err, "Failed to create new Deployment", "Deployment.Namespace", dep.Namespace, "Deployment.Name", dep.Name) + return ctrl.Result{}, err } // Deployment created successfully - return and requeue - return reconcile.Result{Requeue: true}, nil + return ctrl.Result{Requeue: true}, nil } else if err != nil { - reqLogger.Error(err, "Failed to get Deployment") - return reconcile.Result{}, err + log.Error(err, "Failed to get Deployment") + return ctrl.Result{}, err } // Ensure the deployment size is the same as the spec size := memcached.Spec.Size if *found.Spec.Replicas != size { found.Spec.Replicas = &size - err = r.client.Update(context.TODO(), found) + err = r.Update(ctx, found) if err != nil { - reqLogger.Error(err, "Failed to update Deployment", "Deployment.Namespace", found.Namespace, "Deployment.Name", found.Name) - return reconcile.Result{}, err + log.Error(err, "Failed to update Deployment", "Deployment.Namespace", found.Namespace, "Deployment.Name", found.Name) + return ctrl.Result{}, err } // Spec updated - return and requeue - return reconcile.Result{Requeue: true}, nil + return ctrl.Result{Requeue: true}, nil } // Update the Memcached status with the pod names @@ -145,27 +102,27 @@ func (r *ReconcileMemcached) Reconcile(request reconcile.Request) (reconcile.Res client.InNamespace(memcached.Namespace), client.MatchingLabels(labelsForMemcached(memcached.Name)), } - if err = r.client.List(context.TODO(), podList, listOpts...); err != nil { - reqLogger.Error(err, "Failed to list pods", "Memcached.Namespace", memcached.Namespace, "Memcached.Name", memcached.Name) - return reconcile.Result{}, err + if err = r.List(ctx, podList, listOpts...); err != nil { + log.Error(err, "Failed to list pods", "Memcached.Namespace", memcached.Namespace, "Memcached.Name", memcached.Name) + return ctrl.Result{}, err } podNames := getPodNames(podList.Items) // Update status.Nodes if needed if !reflect.DeepEqual(podNames, memcached.Status.Nodes) { memcached.Status.Nodes = podNames - err := r.client.Status().Update(context.TODO(), memcached) + err := r.Status().Update(ctx, memcached) if err != nil { - reqLogger.Error(err, "Failed to update Memcached status") - return reconcile.Result{}, err + log.Error(err, "Failed to update Memcached status") + return ctrl.Result{}, err } } - return reconcile.Result{}, nil + return ctrl.Result{}, nil } // deploymentForMemcached returns a memcached Deployment object -func (r *ReconcileMemcached) deploymentForMemcached(m *cachev1alpha1.Memcached) *appsv1.Deployment { +func (r *MemcachedReconciler) deploymentForMemcached(m *cachev1alpha1.Memcached) *appsv1.Deployment { ls := labelsForMemcached(m.Name) replicas := m.Spec.Size @@ -198,7 +155,7 @@ func (r *ReconcileMemcached) deploymentForMemcached(m *cachev1alpha1.Memcached) }, } // Set Memcached instance as the owner and controller - controllerutil.SetControllerReference(m, dep, r.scheme) + ctrl.SetControllerReference(m, dep, r.Scheme) return dep } @@ -216,3 +173,10 @@ func getPodNames(pods []corev1.Pod) []string { } return podNames } + +func (r *MemcachedReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&cachev1alpha1.Memcached{}). + Owns(&appsv1.Deployment{}). + Complete(r) +} diff --git a/hack/check-links.sh b/hack/check-links.sh index 6437414456..15a3b87eb8 100755 --- a/hack/check-links.sh +++ b/hack/check-links.sh @@ -6,6 +6,6 @@ source ./hack/lib/common.sh header_text "Building the site and checking links" docker volume create sdk-html -docker run --rm -v "$(pwd):/src" -v sdk-html:/target klakegg/hugo:0.73.0-ext-ubuntu -s website +docker run --rm -v "$(pwd):/src" -v sdk-html:/src/website/public klakegg/hugo:0.73.0-ext-ubuntu -s website docker run --rm -v sdk-html:/target mtlynch/htmlproofer /target --empty-alt-ignore --http-status-ignore 429 --allow_hash_href docker volume rm sdk-html diff --git a/website/content/en/docs/ansible/quickstart.md b/website/content/en/docs/ansible/quickstart.md index 195ba5ea3c..e561e5e150 100644 --- a/website/content/en/docs/ansible/quickstart.md +++ b/website/content/en/docs/ansible/quickstart.md @@ -367,7 +367,7 @@ For more information, refer [cli][addcli] doc. [ansible-runner-http-plugin]:https://github.com/ansible/ansible-runner-http [ansible-runner-tool]: https://ansible-runner.readthedocs.io/en/latest/install.html [ansible-watches]: /docs/ansible/reference/watches -[operator-scope]:../../legacy-common/operator-scope +[operator-scope]:https://v0-19-x.sdk.operatorframework.io/docs/legacy-common/operator-scope/ [layout-doc]:../reference/scaffolding [homebrew-tool]:https://brew.sh/ [install-guide]: /docs/install-operator-sdk @@ -377,4 +377,4 @@ For more information, refer [cli][addcli] doc. [kubectl-tool]:https://kubernetes.io/docs/tasks/tools/install-kubectl/ [addcli]: /docs/cli/operator-sdk_add_api -[quickstart-bundle]: /docs/olm-integration/legacy/quickstart-bundle +[quickstart-bundle]:https://v0-19-x.sdk.operatorframework.io/docs/olm-integration/legacy/quickstart-bundle/ diff --git a/website/content/en/docs/contribution-guidelines/testing/travis-build.md b/website/content/en/docs/contribution-guidelines/testing/travis-build.md index bd23ba4af3..077048fca6 100644 --- a/website/content/en/docs/contribution-guidelines/testing/travis-build.md +++ b/website/content/en/docs/contribution-guidelines/testing/travis-build.md @@ -51,18 +51,14 @@ The Go, Ansible, and Helm tests then differ in what tests they run. ### Helm Tests 1. Run [helm e2e tests][helm-e2e]. - 1. Create base helm operator project by running [`hack/image/helm/scaffold-helm-image.go`][helm-base]. - 2. Build base helm operator image. - 3. Create and configure a new helm type nginx-operator. - 4. Create cluster resources. - 5. Wait for operator to be ready. - 6. Create nginx CR and wait for it to be ready. - 7. Scale up the dependent deployment and verify the operator reconciles it back down. - 8. Scale up the CR and verify the dependent deployment scales up accordingly. - 9. Delete nginx CR and verify that finalizer (which writes a message in the operator logs) ran. - 10. Run `operator-sdk migrate` to add go source to the operator (see this [note][deps_mgmt] on dependency management first). - 11. Run `operator-sdk build` to compile the new binary and build a new image. - 12. Re-run steps 4-9 to test the migrated operator. + 1. Build base helm operator image. + 1. Create and configure a new helm type nginx-operator. + 1. Create cluster resources. + 1. Wait for operator to be ready. + 1. Create nginx CR and wait for it to be ready. + 1. Scale up the dependent deployment and verify the operator reconciles it back down. + 1. Scale up the CR and verify the dependent deployment scales up accordingly. + 1. Delete nginx CR and verify that finalizer (which writes a message in the operator logs) ran. **NOTE**: All created resources, including the namespace, are deleted using a bash trap when the test finishes @@ -75,5 +71,3 @@ The Go, Ansible, and Helm tests then differ in what tests they run. [ansible-molecule]: https://github.com/operator-framework/operator-sdk/blob/master/hack/tests/e2e-ansible-molecule.sh [ansible-test]: https://github.com/operator-framework/operator-sdk/tree/master/test/ansible [helm-e2e]: https://github.com/operator-framework/operator-sdk/blob/master/hack/tests/e2e-helm.sh -[helm-base]: https://github.com/operator-framework/operator-sdk/blob/master/hack/image/helm/scaffold-helm-image.go -[deps_mgmt]: /docs/golang/legacy/quickstart#a-note-on-dependency-management diff --git a/website/content/en/docs/faq.md b/website/content/en/docs/faq.md index 48e959656f..c365346b87 100644 --- a/website/content/en/docs/faq.md +++ b/website/content/en/docs/faq.md @@ -29,27 +29,6 @@ Never seeing this warning may suggest that your watch or cache is not healthy. I For more information on `kube-apiserver` request timeout options, see the [Kubernetes API Server Command Line Tool Reference][kube-apiserver_options] -## I keep seeing errors like "Failed to create metrics Service", how do I fix this? - -If you run into the following error message: - -``` -time="2019-06-05T12:29:54Z" level=fatal msg="failed to create or get service for metrics: services \"my-operator\" is forbidden: cannot set blockOwnerDeletion if an ownerReference refers to a resource you can't set finalizers on: , " -``` - -Add the following to your `deploy/role.yaml` file to grant the operator permissions to set owner references to the metrics Service resource. This is needed so that the metrics Service will get deleted as soon as you delete the operators Deployment. If you are using another way of deploying your operator, have a look at [this guide][gc-metrics] for more information. - -``` -- apiGroups: - - apps - resources: - - deployments/finalizers - resourceNames: - - - verbs: - - "update" -``` - ## My Ansible module is missing a dependency. How do I add it to the image? Unfortunately, adding the entire dependency tree for all Ansible modules would be excessive. Fortunately, you can add it easily. Simply edit your build/Dockerfile. You'll want to change to root for the install command, just be sure to swap back using a series of commands like the following right after the `FROM` line. @@ -66,20 +45,20 @@ If you aren't sure what dependencies are required, start up a container using th ## I keep seeing errors like "Failed to watch", how do I fix this? -If you run into the following error message: +If you run into the following error message, it means that your operator is unable to watch the resoruce: ``` E0320 15:42:17.676888 1 reflector.go:280] pkg/mod/k8s.io/client-go@v0.0.0-20191016111102-bec269661e48/tools/cache/reflector.go:96: Failed to watch *v1.ImageStreamTag: unknown (get imagestreamtags.image.openshift.io) {"level":"info","ts":1584718937.766342,"logger":"controller_memcached","msg":"ImageStreamTag resource not found. ``` -Then, it means that your Operator is unable to watch the resource. This scenario can be faced because the Operator does not have the permission [(RBAC)[rbac]] to `Watch` the resource, or may be the Schema from the API used, did not implement this verb. In this way the solution would be to grant the permission in the `role.yaml` , or when it is not possible, use the [client.Reader][client.Reader] instead of the client provided. +Using controller-runtime's split client means that read operations (gets and lists) are read from a cache, and write operations are written directly to the API server. To populate the cache for reads, controller-runtime initiates a `list` and then a `watch` even when your operator is only attempting to `get` a single resource. The above scenario occurs when the operator does not have an (RBAC)[rbac] permission to `watch` the resource. The solution is to grant permission in the `config/rbac/role.yaml` file. -The client provided will work with a cache, and because of this, the `WATCH` verb is required. +In rare cases, it also could be that the particular resource does not implement the `watch` verb. In this case, it is necessary to use the [client.Reader][client.Reader] instead of the default split client. The manager's `GetAPIReader()` function can be used to get this reader. **Example** -Following are the changes in the `conttroler.go`, to address the need to get the resource via the [client.Reader][client.Reader]. See: +Here is an example that demonstrates how to use a `client.Reader` when a resource does not implement the `watch` verb: ```go @@ -123,30 +102,9 @@ func (r *ReconcileMemcached) Reconcile(request reconcile.Request) (reconcile.Res } ``` -## I see deepcopy errors and image build fails. How do I fix this? - -When you run the ```operator-sdk generate k8s``` command, you might see an error like this - -``` -INFO[0000] Running deepcopy code-generation for Custom Resource group versions: [cache:[v1alpha1], ] -F0523 01:18:27.122034 5157 deepcopy.go:885] Hit an unsupported type invalid type for invalid type, from github.com/example-inc/memcached-operator/pkg/apis/cache/v1alpha1.Memcached -``` - -This is because of the `GOROOT` environment variable not being set. More details [here][goroot-github-issue]. - -In order to fix this, you simply need to export the `GOROOT` environment variable - -``` -$ export GOROOT=$(go env GOROOT) -``` - -This will work for the current environment. To persist this fix, add the above line to your environment's config file, ex. `bashrc` file. - [kube-apiserver_options]: https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/#options [controller-runtime_faq]: https://github.com/kubernetes-sigs/controller-runtime/blob/master/FAQ.md#q-how-do-i-have-different-logic-in-my-reconciler-for-different-types-of-events-eg-create-update-delete [finalizer]:/docs/golang/advanced-topics/#handle-cleanup-on-deletion -[gc-metrics]:/docs/golang/legacy/monitoring/prometheus/#garbage-collection [cr-faq]:https://github.com/kubernetes-sigs/controller-runtime/blob/master/FAQ.md [client.Reader]:https://godoc.org/sigs.k8s.io/controller-runtime/pkg/client#Reader [rbac]:https://kubernetes.io/docs/reference/access-authn-authz/rbac/ -[goroot-github-issue]:https://github.com/operator-framework/operator-sdk/issues/1854#issuecomment-525132306 diff --git a/website/content/en/docs/golang/legacy/_index.md b/website/content/en/docs/golang/legacy/_index.md deleted file mode 100644 index 7703f71e51..0000000000 --- a/website/content/en/docs/golang/legacy/_index.md +++ /dev/null @@ -1,4 +0,0 @@ ---- -title: Legacy CLI and Project Layout -weight: 300 ---- diff --git a/website/content/en/docs/golang/legacy/migrating-existing-apis.md b/website/content/en/docs/golang/legacy/migrating-existing-apis.md deleted file mode 100644 index 51145367e3..0000000000 --- a/website/content/en/docs/golang/legacy/migrating-existing-apis.md +++ /dev/null @@ -1,405 +0,0 @@ ---- -title: Migrating Existing Kubernetes APIs -linkTitle: Migrating Existing APIs -weight: 20 ---- - -Kubernetes APIs are assumed to evolve over time, hence the well-defined API [versioning scheme][k8s-versioning]. Upgrading your operator's APIs can be a non-trivial task, one that will involve changing quite a few source files and manifests. This document aims to identify the complexities of migrating an operator project's API using examples from existing operators. - -While examples in this guide follow particular types of API migrations, most of the documented migration steps can be generalized to all migration types. - -## Upgrading one Kind to a new Version from a Version with multiple Kinds - -**Scenario:** your Go operator test-operator has one API version `v1` for group `operators.example.com`. You would like to migrate (upgrade) one kind `CatalogSourceConfig` to `v2` while keeping the other `v1` kind `OperatorGroup` in `v1`. These kinds will remain in group `operators.example.com`. Your project structure looks like the following: - -```console -$ tree pkg/apis -pkg/apis/ -├── addtoscheme_operators_v1.go -├── apis.go -└── operators - └── v1 - ├── catalogsourceconfig_types.go - ├── catalogsourceconfig.go - ├── doc.go - ├── operatorgroup_types.go - ├── operatorgroup.go - ├── phase.go - ├── phase_types.go - ├── register.go - ├── shared.go - ├── zz_generated.deepcopy.go -``` - -Relevant files: - -- `catalogsourceconfig_types.go` and `catalogsourceconfig.go` contain types and functions used by API kind type `CatalogSourceConfig`. -- `operatorgroup_types.go` and `operatorgroup.go` contain types and functions used by API kind type `OperatorGroup`. -- `phase_types.go` and `phase.go` contain types and functions used by *non-API* type `Phase`, which is used by both `CatalogSourceConfig` and `OperatorGroup` types. -- `shared.go` contain types and functions used by both `CatalogSourceConfig` and `OperatorGroup` types. - -#### Questions to ask yourself -1. **Scope:** what files, Go source and YAML, must I modify when migrating? -1. **Shared code:** do I have shared types and functions between `CatalogSourceConfig` and `OperatorGroup`? How do I want shared code refactored? -1. **Imports:** which packages import those I am migrating? How do I modify these packages to import `v2` and new shared package(s)? -1. **Backwards-compatibility:** do I want to remove code being migrated from `v1` entirely, forcing the use of `v2`, or support both `v1` and `v2` going forward? - ---- - -### Creating a new API Version - -Creating the new version `v2` is the first step in upgrading your kind `CatalogSourceConfig`. Use the `operator-sdk` to do so by running the following command: - -```console -$ operator-sdk add api --api-version operators.example.com/v2 --kind CatalogSourceConfig -``` - -This command creates a new API version `v2` under group `operators`: - -```console -$ tree pkg/apis -pkg/apis/ -├── addtoscheme_operators_v1.go -├── addtoscheme_operators_v2.go # new addtoscheme source file for v2 -├── apis.go -└── operators - └── v1 - | ├── catalogsourceconfig_types.go - | ├── catalogsourceconfig.go - | ├── doc.go - | ├── operatorgroup_types.go - | ├── operatorgroup.go - | ├── phase.go - | ├── phase_types.go - | ├── register.go - | ├── shared.go - | ├── zz_generated.deepcopy.go - └── v2 # new version dir with source files for v2 - ├── catalogsourceconfig_types.go - ├── doc.go - ├── register.go - ├── zz_generated.deepcopy.go -``` - -In addition to creating a new API version, the command creates an `addtoscheme_operators_v2.go` file that exposes an `AddToScheme()` function for registering `v2.CatalogSourceConfig` and `v2.CatalogSourceConfigList`. - -### Copying shared type definitions and functions to a separate package - -Now that the `v2` package and related files exist, we can begin moving types and functions around. First, we must copy anything shared between `CatalogSourceConfig` and `OperatorGroup` to a separate package that can be imported by `v1`, `v2`, and future versions. We've identified the files containing these types above: `phase.go`, `phase_types.go`, and `shared.go`. - -#### Creating a new `shared` package - -Lets create a new package `shared` at `pkg/apis/operators/shared` for these files: - -```console -$ pwd -/home/user/projects/test-operator -$ mkdir pkg/apis/operators/shared -``` - -This package is not a typical API because it contains types only to be used as parts of larger schema, and therefore should not be created with `operator-sdk add api`. It should contain a `doc.go` file with some package-level documentation and annotations: - -```console -$ cat > pkg/apis/operators/shared/doc.go < `package shared`. -- Moving and exporting currently unexported (private) types, their methods, and functions used by `v1` types to `pkg/apis/operators/internal/shared/shared.go`. Exported them in an internal shared package will keep them private while allowing functions and types in `shared` to use them. - -Additionally, `deepcopy-gen` must be run on the `shared` package to generate `DeepCopy()` and `DeepCopyInto()` methods, which are necessary for all Kubernetes API types. To do so, run the following command: - -```console -$ operator-sdk generate k8s -``` - -Now that shared types and functions have their own package we can update any package that imports those types from `v1` to use `shared`. The `CatalogSourceConfig` controller source file `pkg/controller/catalogsourceconfig/catalogsourceconfig_controller.go` imports and uses a type defined in `v1`, `PhaseRunning`, in its `Reconcile()` method. `PhaseRunning` should be imported from `shared` as follows: - -```Go -import ( - "context" - - operatorsv1 "github.com/test-org/test-operator/pkg/apis/operators/v1" - // New import - "github.com/test-org/test-operator/pkg/apis/operators/shared" - - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -... - -func (r *ReconcileCatalogSourceConfig) Reconcile(request reconcile.Request) (reconcile.Result, error) { - ... - - config := &operatorsv1.CatalogSourceConfig{} - err := r.client.Get(context.TODO(), request.NamespacedName, config) - if err != nil { - ... - } - // Old - if config.Status.CurrentPhase.Phase.Name != operatorsv1.PhaseRunning { - ... - } - // New - if config.Status.CurrentPhase.Phase.Name != shared.PhaseRunning { - ... - } -} -``` - -Do this for all instances of types previously in `v1` that are now in `shared`. - -Following Kubernetes API version upgrade conventions, code moved to `shared` from `v1` should be marked with "Deprecated" comments in `v1` instead of being removed. While leaving these types in `v1` duplicates code, it allows backwards compatibility for API users; deprecation comments direct users to switch to `v2` and `shared` types. - -Alternatively, types and functions migrated to `shared` can be removed in `v1` to de-duplicate code. This breaks backwards compatibility because projects relying on exported types previously in `v1`, now in `shared`, will be forced to update their imports to use `shared` when upgrading VCS versions. If following this upgrade path, note that updating package import paths in your project will likely be the most pervasive change lines-of-code-wise in this process. Luckily the Go compiler will tell you which import path's you have missed once `CatalogSourceConfig` types are removed from `v1`! - -If any functions or types were moved to `pkg/apis/operator/internal/shared`, remove them from files in `pkg/apis/operator/shared` and import them into `shared` from the internal package. - -### Updating empty `v2` types using `v1` types - -The `CatalogSourceConfig` type and schema code were generated by `operator-sdk add api`, but the types are not populated. We need to copy existing type data from `v1` to `v2`. This process is similar to migrating shared code, except we do not need to export any types or functions. - -Remove `pkg/apis/operators/v2/catalogsourceconfig_types.go` and copy `catalogsourceconfig.go` and `catalogsourceconfig_types.go` from `pkg/apis/operators/v1` to `pkg/apis/operators/v2`: - -```console -$ rm pkg/apis/operators/v2/catalogsourceconfig_types.go -$ cp pkg/apis/operators/v1/catalogsourceconfig*.go pkg/apis/operators/v2 -``` - -If you have any comments or custom code in `pkg/apis/operators/v1` related to source code in either copied file, ensure that is copied to `doc.go` or `register.go` in `pkg/apis/operators/v2`. - -You can now run `operator-sdk generate k8s` to generate deepcopy code for the migrated `v2` types. Once this is done, update all packages that import the migrated `v1` types to use those in `v2`. - -### Updating CustomResourceDefinition manifests and generating OpenAPI code - -Now that we've migrated all Go types to their destination packages, we must update the corresponding CustomResourceDefinition (CRD) manifests in `deploy/crds`. - -Doing so can be as simple as running the following command: - -```console -$ operator-sdk generate crds -``` - -This command will automatically update all CRD manifests. - -#### CRD Versioning - -Kubernetes 1.11+ supports CRD [`spec.versions`][crd-versions] and `spec.version` is [deprecated][crd-version-deprecated] as of Kubernetes 1.12. SDK versions `v0.10.x` and below leverage [`controller-tools@v0.1.x`][controller-tools]' CRD generator which generates a now-deprecated `spec.version` value based on the version contained in an API's import path. Names of CRD manifest files generated by those SDK versions contain the `spec.version`, i.e. one CRD manifest is created *per version in a group* with the file name format `___crd.yaml`. SDK versions `v0.11+` use `controller-tools@v0.2.x`, which generates `spec.versions` but not `spec.version` by default, and use the file name format `__crd.yaml`. - -**Notes:** -- `` is the full group name of your CRD while `` is the last subdomain of ``, ex. `foo.bar.com` vs `foo`. `` is the plural lower-case of CRD `Kind` specified at `spec.names.plural`. -- Your CRD *must* specify exactly one [storage version][crd-storage-version]. Use the `+kubebuilder:storageversion` [marker][crd-markers] to indicate the GVK that should be used to store data by the API server. This marker should be in a comment above your `CatalogSourceConfig` type. -- If your operator does not have custom data manually added to its CRD's, you can skip to the [following section](#migration-types-and-commonalities-between-them); `operator-sdk generate crds` will handle CRD updates in that case. - -Upgrading from `spec.version` to `spec.versions` will be demonstrated using the following CRD manifest example: - -`deploy/crds/operators_v1_catalogsourceconfig_crd.yaml`: -```yaml -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: catalogsourceconfigs.operators.coreos.com -spec: - group: operators.coreos.com - names: - kind: CatalogSourceConfig - listKind: CatalogSourceConfigList - plural: catalogsourceconfigs - singular: catalogsourceconfig - scope: Namespaced - validation: - openAPIV3Schema: - properties: - apiVersion: - type: string - kind: - type: string - metadata: - type: object - spec: - properties: - size: - format: int32 - type: integer - test: - type: string - required: - - size - type: object - status: - properties: - nodes: - items: - type: string - type: array - required: - - nodes - type: object - version: v1 - subresources: - status: {} -``` - -Steps to upgrade the above CRD: - -1. Rename your CRD manifest file from `deploy/crds/operators_v1_catalogsourceconfig_crd.yaml` to `deploy/crds/operators.coreos.com_catalogsourceconfigs_crd.yaml` - - ```console - $ mv deploy/crds/cache_v1alpha1_memcached_crd.yaml deploy/crds/operators.coreos.com_catalogsourceconfigs_crd.yaml - ``` - -1. Create a `spec.versions` list that contains two elements for each version that now exists (`v1` and `v2`): - - ```yaml - spec: - ... - # version is now v2, as it must match the first element in versions. - version: v2 - versions: - - name: v2 - # Set to true for this CRD version to be enabled in-cluster. - served: true - # Exactly one CRD version should be a storage version. - storage: true - - name: v1 - served: true - storage: false - ``` - - The first version in `spec.versions` *must* match that in `spec.version` if `spec.version` exists in the manifest. - -1. *Optional:* `spec.versions` elements have a `schema` field that holds a version-specific OpenAPIV3 validation block to override the global `spec.validation` block. `spec.validation` will be used by the API server to validate one or more versions in `spec.versions` that do not have a `schema` block. If all versions have the same schema, leave `spec.validation` as-is and skip to the [following section](#migration-types-and-commonalities-between-them). If your CRD versions differ in scheme, copy `spec.validation` YAML to the `schema` field in each `spec.versions` element, then modify as needed: - - ```yaml - spec: - ... - version: v2 - versions: - - name: v2 - served: true - storage: true - schema: # v2-specific OpenAPIV3 validation block. - openAPIV3Schema: - properties: - apiVersion: - type: string - ... - - name: v1 - served: true - storage: false - schema: # v1-specific OpenAPIV3 validation block. - openAPIV3Schema: - properties: - apiVersion: - type: string - ... - ``` - - The API server will validate each version by its own `schema` if the global `spec.validation` block is removed. No validation will be performed if a `schema` does not exist for a version and `spec.validation` does not exist. - - If the CRD targets a Kubernetes 1.13+ cluster with the `CustomResourceWebhookConversion` feature enabled, converting between multiple versions can be done using a [conversion][crd-conv]. The `None` conversion is simple and useful when the CRD spec has not changed; it only updates the `apiVersion` field of custom resources: - - ```yaml - spec: - ... - conversion: - strategy: None - ``` - - More complex conversions can be done using [conversion webhooks][crd-conv-webhook]. - - _TODO:_ document adding and using conversion webhooks to migrate `v1` to `v2` once the SDK `controller-runtime` version is bumped to `v0.2.0`. - - **Note:** read the [CRD versioning][crd-versions] docs for detailed CRD information, notes on conversion webhooks, and CRD versioning case studies. - -1. *Optional:* `spec.versions` elements have a `subresources` field that holds CR subresource information to override the global `spec.subresources` block. `spec.subresources` will be used by the API server to assess subresource requirements of any version in `spec.versions` that does not have a `subresources` block. If all versions have the same requirements, leave `spec.subresources` as-is and skip to the [following section](#migration-types-and-commonalities-between-them). If CRD versions differ in subresource requirements, add a `subresources` section in each `spec.versions` entry with differing requirements and add each subresource's spec and status as needed: - - ```yaml - spec: - ... - version: v2 - versions: - - name: v2 - served: true - storage: true - subresources: - ... - - name: v1 - served: true - storage: false - subresources: - ... - ``` - - Remove the global `spec.subresources` block if all versions have different subresource requirements. - -1. *Optional:* remove `spec.version`, as it is deprecated in favor of `spec.versions`. - -### Migration Types and Commonalities between them - -This version upgrade walkthrough demonstrates only one of several possible migration scenarios: - -- Group migration, ex. moving an API from group `operators.example.com/v1` to `new-group.example.com/v1alpha1`. -- Kind change, ex. `CatalogSourceConfig` to `CatalogSourceConfigurer`. -- Some combination of group, version, and kind migration. - -Each case is different; one may require many more changes than others. However, there are several themes common to all: - -1. Using `operator-sdk add api` to create the necessary directory structure and files used in migration. - - Group migration using the same version, for each kind in the old group `operators.example.com` you want to migrate: - - ```console - $ operator-sdk add api --api-version new-group.example.com/v1 --kind YourKind - ``` - - - Kind migration, using the same group and version as `CatalogSourceConfig`: - - ```console - $ operator-sdk add api --api-version operators.example.com/v1 --kind CatalogSourceConfigurer - ``` - -1. Copying code from one Go package to another, ex. from `v1` to `v2` and `shared`. -1. Changing import paths in project Go source files to those of new packages. -1. Updating CRD manifests. - - In many cases, having sufficient [code annotations][kubebuilder-api-annotations] and running `operator-sdk generate crds` will be enough. - -The Go toolchain can be your friend here too. Running `go vet ./...` can tell you what import paths require changing and what type instantiations are using fields incorrectly. - -[k8s-versioning]:https://kubernetes.io/docs/concepts/overview/kubernetes-api/#api-versioning -[deepcopy-gen]:https://godoc.org/k8s.io/gengo/examples/deepcopy-gen -[client-gen]:https://github.com/kubernetes/community/blob/master/contributors/devel/sig-api-machinery/generating-clientset.md -[crd-storage-version]:https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning/#writing-reading-and-updating-versioned-customresourcedefinition-objects -[crd-markers]:https://book.kubebuilder.io/reference/markers/crd.html -[controller-tools]:https://github.com/kubernetes-sigs/controller-tools -[crd-versions]:https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning/ -[crd-conv]:https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning/#webhook-conversion -[crd-conv-webhook]:https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning/#configure-customresourcedefinition-to-use-conversion-webhooks -[kubebuilder-api-annotations]:https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html -[crd-version-deprecated]:https://github.com/kubernetes/apiextensions-apiserver/commit/d1c6536f26319513417b12245c6e3aee5ca005ca diff --git a/website/content/en/docs/golang/legacy/monitoring/_index.md b/website/content/en/docs/golang/legacy/monitoring/_index.md deleted file mode 100644 index ab77cdf61e..0000000000 --- a/website/content/en/docs/golang/legacy/monitoring/_index.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: Golang Based Operator Monitoring -linkTitle: Monitoring -weight: 100 ---- \ No newline at end of file diff --git a/website/content/en/docs/golang/legacy/monitoring/prometheus.md b/website/content/en/docs/golang/legacy/monitoring/prometheus.md deleted file mode 100644 index 7d46de4e7c..0000000000 --- a/website/content/en/docs/golang/legacy/monitoring/prometheus.md +++ /dev/null @@ -1,97 +0,0 @@ ---- -title: Operator SDK monitoring with Prometheus -linkTitle: With Prometheus -weight: 1 ---- - -[Prometheus][prometheus] is an open-source systems monitoring and alerting toolkit. Below is the overview of the different helpers that exist in Operator SDK to help setup metrics in the generated operator. - -## Metrics in Operator SDK - -### General metrics - -The `CreateMetricsService(ctx context.Context, cfg *rest.Config, servicePorts []v1.ServicePort) (*v1.Service, error)` function exposes general metrics about the running program. These metrics are inherited from controller-runtime. To understand which metrics are exposed, read the metrics package doc of [controller-runtime][controller-metrics]. The function creates a [Service][service] object with the metrics port exposed, which can then be accessed by Prometheus. The Service object is [garbage collected][gc] when the leader pod's root owner is deleted. - -By default, the metrics are served on `0.0.0.0:8383/metrics`. To modify the port the metrics are exposed on, change the `var metricsPort int32 = 8383` variable in the `cmd/manager/main.go` file of the generated operator. - -#### Usage: - -```go - import( - "context" - - "github.com/operator-framework/operator-sdk/pkg/metrics" - "sigs.k8s.io/controller-runtime/pkg/manager" - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/intstr" - ) - - func main() { - - ... - - // Change the below variables to serve metrics on different host or port. - var metricsHost = "0.0.0.0" - var metricsPort int32 = 8383 - - // Pass metrics address to controller-runtime manager - mgr, err := manager.New(cfg, manager.Options{ - Namespace: namespace, - MetricsBindAddress: fmt.Sprintf("%s:%d", metricsHost, metricsPort), - }) - - ... - - // Add to the below struct any other metrics ports you want to expose. - servicePorts := []v1.ServicePort{ - {Port: metricsPort, Name: metrics.OperatorPortName, Protocol: v1.ProtocolTCP, TargetPort: intstr.IntOrString{Type: intstr.Int, IntVal: metricsPort}}, - } - - // Create Service object to expose the metrics port. - _, err = metrics.CreateMetricsService(context.TODO(), cfg, servicePorts) - if err != nil { - // handle error - } - - ... - - } -``` - -*Note:* The above example is already present in `cmd/manager/main.go` in all the operators generated with Operator SDK from v0.5.0 onwards. - -#### Garbage collection - -The metrics Service is [garbage collected][gc] when the resource used to deploy the operator is deleted (e.g. `Deployment`). This resource is determined when the metrics Service is created, at that time the resource owner reference is added to the Service. - -In Kubernetes clusters where [OwnerReferencesPermissionEnforcement][ownerref-permission] is enabled (on by default in all OpenShift clusters), the role requires a `/finalizers` rule to be added. By default when creating the operator with the Operator SDK, this is done automatically under the assumption that the `Deployment` object was used to create the operator pods. In case another method of deploying the operator is used, replace the `- deployments/finalizers` in the `deploy/role.yaml` file. Example rule from `deploy/role.yaml` file for deploying operator with a `StatefulSet`: - -```yaml -... -- apiGroups: - - apps - resourceNames: - - - resources: - - statefulsets/finalizers - verbs: - - update -... -``` - -### Custom resource specific metrics - -By default operator will expose info metrics based on the number of the current instances of an operator's custom resources in the cluster. It leverages [kube-state-metrics][ksm] as a library to generate those metrics. Metrics initialization lives in the `cmd/manager/main.go` file of the operator in the `serveCRMetrics` function. Its arguments are a custom resource's group, version, and kind to generate the metrics. The metrics are served on `0.0.0.0:8686/metrics` by default. To modify the exposed metrics port number, change the `operatorMetricsPort` variable at the top of the `cmd/manager/main.go` file in the generated operator. - -### Expose custom metrics - -The operator uses [Prometheus][prometheus] to expose a number of metrics by default. In order to expose custom metrics they have to be registered with the `Registry` object. An example can be found in the [kubebuilder book][kubebuilder]. - - -[kubebuilder]: https://book.kubebuilder.io/reference/metrics.html -[prometheus]: https://prometheus.io/ -[service]: https://kubernetes.io/docs/concepts/services-networking/service/ -[gc]: https://kubernetes.io/docs/concepts/workloads/controllers/garbage-collection/#owners-and-dependents -[ownerref-permission]: https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement -[ksm]: https://github.com/kubernetes/kube-state-metrics -[controller-metrics]: https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/internal/controller/metrics diff --git a/website/content/en/docs/golang/legacy/monitoring/service-monitor.md b/website/content/en/docs/golang/legacy/monitoring/service-monitor.md deleted file mode 100644 index 92eee08336..0000000000 --- a/website/content/en/docs/golang/legacy/monitoring/service-monitor.md +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: Using the Prometheus Operator ServiceMonitor CRD with Operator SDK -linkTitle: Using Prometheus Operator ServiceMonitor CRD -weight: 2 ---- - -[`prometheus-operator`][prom-operator] is an operator that creates, configures, and manages Prometheus clusters atop Kubernetes. - -`ServiceMonitor` is a CustomResource of the prometheus-operator, which discovers the `Endpoints` in `Service` objects and configures Prometheus to monitor those pods. See the prometheus-operator [documentation][service-monitor] to learn more about `ServiceMonitor`. - -The `CreateServiceMonitors` function takes `Service` objects and generates `ServiceMonitor` resources based on the endpoints. To add `Service` target discovery of your created monitoring `Service` you can use the `metrics.CreateServiceMonitors()` helper function, which accepts the newly created `Service`. - -## Prerequisites: - -- [prometheus-operator][prom-quickstart] needs to be deployed in the cluster. - -## Usage example: - -```go - import( - "k8s.io/api/core/v1" - "github.com/operator-framework/operator-sdk/pkg/metrics" - ) - - func main() { - - ... - - // Populate below with the Service(s) for which you want to create ServiceMonitors. - services := []*v1.Service{} - - // Create one `ServiceMonitor` per application per namespace. - // Change below value to name of the Namespace you want the `ServiceMonitor` to be created in. - ns := "default" - - // Pass the Service(s) to the helper function, which in turn returns the array of `ServiceMonitor` objects. - serviceMonitors, err := metrics.CreateServiceMonitors(restConfig, ns, services) - if err != nil { - // handle error here - } - - ... - } -``` - -[prom-operator]: https://github.com/coreos/prometheus-operator -[service-monitor]: https://github.com/coreos/prometheus-operator/blob/7a25bf6b6bb2347dacb235659b73bc210117acc7/Documentation/design.md#servicemonitor -[prom-quickstart]: https://github.com/coreos/prometheus-operator/tree/master/contrib/kube-prometheus#quickstart diff --git a/website/content/en/docs/golang/legacy/quickstart.md b/website/content/en/docs/golang/legacy/quickstart.md deleted file mode 100644 index 7471d51f5b..0000000000 --- a/website/content/en/docs/golang/legacy/quickstart.md +++ /dev/null @@ -1,870 +0,0 @@ ---- -title: Golang Based Operator Quickstart (Legacy) -linkTitle: Quickstart -weight: 2 ---- - -**Note:** This guide is for the legacy CLI and project layout. See the [new docs][new_docs] for the [Kubebuilder aligned CLI][new_CLI] and project layout. - -This guide walks through an example of building a simple memcached-operator using the operator-sdk CLI tool and controller-runtime library API. - -## Create a new project - -Use the CLI to create a new memcached-operator project: - -```sh -$ mkdir -p $HOME/projects -$ cd $HOME/projects -$ operator-sdk new memcached-operator --repo=github.com/example-inc/memcached-operator -$ cd memcached-operator -``` - -To learn about the project directory structure, see [project layout][layout_doc] doc. - -#### A note on dependency management - -`operator-sdk new` generates a `go.mod` file to be used with [Go modules][go_mod_wiki]. The `--repo=` flag is required when creating a project outside of `$GOPATH/src`, as scaffolded files require a valid module path. Ensure you activate module support before using the SDK. From the [Go modules Wiki][go_mod_wiki]: - -> You can activate module support in one of two ways: -> - Invoke the go command in a directory with a valid go.mod file in the current directory or any parent of it and the environment variable GO111MODULE unset (or explicitly set to auto). -> - Invoke the go command with GO111MODULE=on environment variable set. - -##### Vendoring - -By default `--vendor=false`, so an operator's dependencies are downloaded and cached in the Go modules cache. Calls to `go {build,clean,get,install,list,run,test}` by `operator-sdk` subcommands will use an external modules directory. Execute `go help modules` for more information. - -The Operator SDK can create a [`vendor`][go_vendoring] directory for Go dependencies if the project is initialized with `--vendor=true`. - -#### Operator scope - -Read the [operator scope][operator_scope] documentation on how to run your operator as namespace-scoped vs cluster-scoped. - -### Manager -The main program for the operator `cmd/manager/main.go` initializes and runs the [Manager][manager_go_doc]. - -The Manager will automatically register the scheme for all custom resources defined under `pkg/apis/...` and run all controllers under `pkg/controller/...`. - -The Manager can restrict the namespace that all controllers will watch for resources: -```Go -mgr, err := manager.New(cfg, manager.Options{Namespace: namespace}) -``` -By default this will be the namespace that the operator is running in. To watch all namespaces leave the namespace option empty: -```Go -mgr, err := manager.New(cfg, manager.Options{Namespace: ""}) -``` - -It is also possible to use the [MultiNamespacedCacheBuilder][multi-namespaced-cache-builder] to watch a specific set of namespaces: -```Go -var namespaces []string // List of Namespaces -// Create a new Cmd to provide shared dependencies and start components -mgr, err := manager.New(cfg, manager.Options{ - NewCache: cache.MultiNamespacedCacheBuilder(namespaces), - MapperProvider: restmapper.NewDynamicRESTMapper, - MetricsBindAddress: fmt.Sprintf("%s:%d", metricsHost, metricsPort), -}) -``` - -By default the main program will set the manager's namespace using the value of `WATCH_NAMESPACE` env defined in `deploy/operator.yaml`. - -## Add a new Custom Resource Definition - -Add a new Custom Resource Definition(CRD) API called Memcached, with APIVersion `cache.example.com/v1alpha1` and Kind `Memcached`. - -```sh -$ operator-sdk add api --api-version=cache.example.com/v1alpha1 --kind=Memcached -``` - -This will scaffold the Memcached resource API under `pkg/apis/cache/v1alpha1/...`. - -### Define the spec and status - -Modify the spec and status of the `Memcached` Custom Resource(CR) at `pkg/apis/cache/v1alpha1/memcached_types.go`: - -```Go -type MemcachedSpec struct { - // Size is the size of the memcached deployment - Size int32 `json:"size"` -} -type MemcachedStatus struct { - // Nodes are the names of the memcached pods - Nodes []string `json:"nodes"` -} -``` - -After modifying the `*_types.go` file always run the following command to update the generated code for that resource type: - -```sh -$ operator-sdk generate k8s -``` - -### Updating CRD manifests - -Now that `MemcachedSpec` and `MemcachedStatus` have fields and possibly annotations, the CRD corresponding to the API's group and kind must be updated. To do so, run the following command: - -```console -$ operator-sdk generate crds -``` - -**Notes:** -- Your CRD *must* specify exactly one [storage version][crd-storage-version]. Use the `+kubebuilder:storageversion` [marker][crd-markers] to indicate the GVK that should be used to store data by the API server. This marker should be in a comment above your `Memcached` type. - -[crd-storage-version]:https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning/#writing-reading-and-updating-versioned-customresourcedefinition-objects -[crd-markers]:https://book.kubebuilder.io/reference/markers/crd.html -[api-rules]: https://github.com/kubernetes/kubernetes/tree/36981002246682ed7dc4de54ccc2a96c1a0cbbdb/api/api-rules - -#### OpenAPI validation - -OpenAPIv3 schemas are added to CRD manifests in the `spec.validation` block when the manifests are generated. This validation block allows Kubernetes to validate the properties in a Memcached Custom Resource when it is created or updated. - -Markers (annotations) are available to configure validations for your API. These markers will always have a `+kubebuilder:validation` prefix. For example, adding an enum type specification can be done by adding the following marker: - -```go -// +kubebuilder:validation:Enum=Lion;Wolf;Dragon -type Alias string -``` - -Usage of markers in API code is discussed in the kubebuilder [CRD generation][generating-crd] and [marker][markers] documentation. A full list of OpenAPIv3 validation markers can be found [here][crd-markers]. - -To update the CRD `deploy/crds/cache.example.com_memcacheds_crd.yaml`, run the following command: - -```console -$ operator-sdk generate crds -``` - -An example of the generated YAML is as follows: - -```YAML -spec: - validation: - openAPIV3Schema: - properties: - spec: - properties: - size: - format: int32 - type: integer -``` - -To learn more about OpenAPI v3.0 validation schemas in Custom Resource Definitions, refer to the [Kubernetes Documentation][doc-validation-schema]. - -[doc-validation-schema]: https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#specifying-a-structural-schema -[generating-crd]: https://book.kubebuilder.io/reference/generating-crd.html -[markers]: https://book.kubebuilder.io/reference/markers.html -[crd-markers]: https://book.kubebuilder.io/reference/markers/crd-validation.html - -## Add a new Controller - -Add a new [Controller][controller-go-doc] to the project that will watch and reconcile the Memcached resource: - -```sh -$ operator-sdk add controller --api-version=cache.example.com/v1alpha1 --kind=Memcached -``` - -This will scaffold a new Controller implementation under `pkg/controller/memcached/...`. - -For this example replace the generated Controller file `pkg/controller/memcached/memcached_controller.go` with the example [`memcached_controller.go`][memcached_controller] implementation. - -The example Controller executes the following reconciliation logic for each `Memcached` CR: -- Create a memcached Deployment if it doesn't exist -- Ensure that the Deployment size is the same as specified by the `Memcached` CR spec -- Update the `Memcached` CR status using the status writer with the names of the memcached pods - -The next two subsections explain how the Controller watches resources and how the reconcile loop is triggered. Skip to the [Build](#build-and-run-the-operator) section to see how to build and run the operator. - -### Resources watched by the Controller - -Inspect the Controller implementation at `pkg/controller/memcached/memcached_controller.go` to see how the Controller watches resources. - -The first watch is for the Memcached type as the primary resource. For each Add/Update/Delete event the reconcile loop will be sent a reconcile `Request` (a namespace/name key) for that Memcached object: - -```Go -err := c.Watch( - &source.Kind{Type: &cachev1alpha1.Memcached{}}, &handler.EnqueueRequestForObject{}) -``` - -The next watch is for Deployments but the event handler will map each event to a reconcile `Request` for the owner of the Deployment. Which in this case is the Memcached object for which the Deployment was created. This allows the controller to watch Deployments as a secondary resource. - -```Go -err := c.Watch(&source.Kind{Type: &appsv1.Deployment{}}, &handler.EnqueueRequestForOwner{ - IsController: true, - OwnerType: &cachev1alpha1.Memcached{}, - }) -``` - -#### Controller configurations - -There are a number of useful configurations that can be made when initialzing a controller and declaring the watch parameters. For more details on these configurations consult the upstream [controller godocs][controller_godocs]. - -- Set the max number of concurrent Reconciles for the controller via the [`MaxConcurrentReconciles`][controller_options] option. Defaults to 1. - ```Go - _, err := controller.New("memcached-controller", mgr, controller.Options{ - MaxConcurrentReconciles: 2, - ... - }) - ``` -- Filter watch events using [predicates][event_filtering] -- Choose the type of [EventHandler][event_handler_godocs] to change how a watch event will translate to reconcile requests for the reconcile loop. For operator relationships that are more complex than primary and secondary resources, the [`EnqueueRequestsFromMapFunc`][enqueue_requests_from_map_func] handler can be used to transform a watch event into an arbitrary set of reconcile requests. - - -### Reconcile loop - -Every Controller has a Reconciler object with a `Reconcile()` method that implements the reconcile loop. The reconcile loop is passed the [`Request`][request-go-doc] argument which is a Namespace/Name key used to lookup the primary resource object, Memcached, from the cache: - -```Go -func (r *ReconcileMemcached) Reconcile(request reconcile.Request) (reconcile.Result, error) { - // Lookup the Memcached instance for this reconcile request - memcached := &cachev1alpha1.Memcached{} - err := r.client.Get(context.TODO(), request.NamespacedName, memcached) - ... -} -``` - -Based on the return values, [`Result`][result_go_doc] and error, the `Request` may be requeued and the reconcile loop may be triggered again: - -```Go -// Reconcile successful - don't requeue -return reconcile.Result{}, nil -// Reconcile failed due to error - requeue -return reconcile.Result{}, err -// Requeue for any reason other than error -return reconcile.Result{Requeue: true}, nil -``` - -You can set the `Result.RequeueAfter` to requeue the `Request` after a grace period as well: -```Go -import "time" - -// Reconcile for any reason than error after 5 seconds -return reconcile.Result{RequeueAfter: time.Second*5}, nil -``` - -**Note:** Returning `Result` with `RequeueAfter` set is how you can periodically reconcile a CR. - -#### Reconcile Result Use Cases -**The following are possible reconcile loop return options.** - -#### 1. With the error: - -If an error is encountered during processing the appropriate return option is to return an error. -This results in the reconcile loop being re-triggered to run again. - -**Usage** -```Go -return reconcile.Result{}, err -``` - -**Example:** - -In the example below a `reconcile.Result{}, err` is used when there is an error reading the object. -As a result the request is requeued for another try. -```Go -// Fetch the Memcached instance -memcached := &cachev1alpha1.Memcached{} -err := r.client.Get(context.TODO(), request.NamespacedName, memcached) -if err != nil { - if errors.IsNotFound(err) { - ... - } - // Error reading the object - requeue the request. - reqLogger.Error(err, "Failed to get Memcached") - return reconcile.Result{}, err -} -``` - -#### 2. Without an error: - -There are several situations where although no error occured, the reconcile loop should signify -during its return that it needs to run again. - -**Usage** -```Go -return reconcile.Result{Requeue: true}, nil -``` - -**Example:** - -In the example below a `reconcile.Result{Requeue: true}, nil` is used because a new resource is being created and as such there is the potential that further processing is required. Thus, the reconcile loop needs to trigger a requeue but there is no error associated with this requeue. -As a result the request is requeued for another try. -```Go -// Define a new deployment -dep := r.deploymentForMemcached(memcached) -... - -// Deployment created successfully - return and requeue -return reconcile.Result{Requeue: true}, nil -``` - -#### 3. Without an error and no need to requeue the request: - -In some situations, such as when the primary resource has been deleted, there is no need to -requeue the request for another attempt - -**Usage** -```Go -return reconcile.Result{}, nil -``` - -**Example:** - -In the example below a `reconcile.Result{}, nil` is used because the Memcached resource was not found, and no further processing is required. -```Go -// Fetch the Memcached instance -memcached := &cachev1alpha1.Memcached{} -err := r.client.Get(context.TODO(), request.NamespacedName, memcached) -if err != nil { - if errors.IsNotFound(err) { - // Request object not found, could have been deleted after reconcile request. - // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers. - // Return and don't requeue - reqLogger.Info("Memcached resource not found. Ignoring since object must be deleted") - return reconcile.Result{}, nil - } -} -... -``` - -For a guide on Reconcilers, Clients, and interacting with resource Events, see the [Client API doc][doc_client_api] and the [controller-runtime documentation over reconcile][controller-runtime-reconcile-godoc]. - -## Build and run the operator - -Before running the operator, the CRD must be registered with the Kubernetes apiserver: - -```sh -$ kubectl create -f deploy/crds/cache.example.com_memcacheds_crd.yaml -``` - -Once this is done, there are two ways to run the operator: - -- As a Deployment inside a Kubernetes cluster -- As Go program outside a cluster - -### 1. Run as a Deployment inside the cluster - -**Note**: `operator-sdk build` invokes `docker build` by default, and optionally `buildah bud`. If using `buildah`, skip to the `operator-sdk build` invocation instructions below. If using `docker`, make sure your docker daemon is running and that you can run the docker client without sudo. You can check if this is the case by running `docker version`, which should complete without errors. Follow instructions for your OS/distribution on how to start the docker daemon and configure your access permissions, if needed. - -**Note**: If a `vendor/` directory is present, run - -```sh -$ go mod vendor -``` - -before building the memcached-operator image. - -Build the memcached-operator image and push it to a registry. Make sure to modify -`quay.io/example/` in the example below to reference a container repository that -you have access to. You can obtain an account for storing containers at -repository sites such quay.io or hub.docker.com: -```sh -$ operator-sdk build quay.io/example/memcached-operator:v0.0.1 -$ sed -i 's|REPLACE_IMAGE|quay.io/example/memcached-operator:v0.0.1|g' deploy/operator.yaml -$ docker push quay.io/example/memcached-operator:v0.0.1 -``` - -**Note** -If you are performing these steps on OSX, use the following `sed` command instead: -```sh -$ sed -i "" 's|REPLACE_IMAGE|quay.io/example/memcached-operator:v0.0.1|g' deploy/operator.yaml -``` - -The Deployment manifest is generated at `deploy/operator.yaml`. Be sure to update the deployment image as shown above since the default is just a placeholder. - -Setup RBAC and deploy the memcached-operator: - -```sh -$ kubectl create -f deploy/service_account.yaml -$ kubectl create -f deploy/role.yaml -$ kubectl create -f deploy/role_binding.yaml -$ kubectl create -f deploy/operator.yaml -``` - -Verify that the memcached-operator is up and running: - -```sh -$ kubectl get deployment -NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE -memcached-operator 1 1 1 1 1m -``` - -### 2. Run locally outside the cluster - -This method is preferred during development cycle to deploy and test faster. - -Set the name of the operator in an environment variable: - -```sh -export OPERATOR_NAME=memcached-operator -``` - -Run the operator locally with the default Kubernetes config file present at `$HOME/.kube/config`. And watch the namespace `default`: - -```sh -$ operator-sdk run local --watch-namespace=default -2018/09/30 23:10:11 Go Version: go1.10.2 -2018/09/30 23:10:11 Go OS/Arch: darwin/amd64 -2018/09/30 23:10:11 operator-sdk Version: 0.0.6+git -2018/09/30 23:10:12 Registering Components. -2018/09/30 23:10:12 Starting the Cmd. -``` - -You can use a specific kubeconfig via the flag `--kubeconfig=`. - -### 3. Deploy your Operator with the Operator Lifecycle Manager (OLM) - -OLM will manage creation of most if not all resources required to run your operator, -using a bit of setup from other `operator-sdk` commands. Check out the OLM integration -[user guide][quickstart-bundle] for more information. - -## Create a Memcached CR - -Create the example `Memcached` CR that was generated at `deploy/crds/cache.example.com_v1alpha1_memcached_cr.yaml`: - -```sh -$ cat deploy/crds/cache.example.com_v1alpha1_memcached_cr.yaml -apiVersion: "cache.example.com/v1alpha1" -kind: "Memcached" -metadata: - name: "example-memcached" -spec: - size: 3 - -$ kubectl apply -f deploy/crds/cache.example.com_v1alpha1_memcached_cr.yaml -``` - -Ensure that the memcached-operator creates the deployment for the CR: - -```sh -$ kubectl get deployment -NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE -memcached-operator 1 1 1 1 2m -example-memcached 3 3 3 3 1m -``` - -Check the pods and CR status to confirm the status is updated with the memcached pod names: - -```sh -$ kubectl get pods -NAME READY STATUS RESTARTS AGE -example-memcached-6fd7c98d8-7dqdr 1/1 Running 0 1m -example-memcached-6fd7c98d8-g5k7v 1/1 Running 0 1m -example-memcached-6fd7c98d8-m7vn7 1/1 Running 0 1m -memcached-operator-7cc7cfdf86-vvjqk 1/1 Running 0 2m -``` - -```sh -$ kubectl get memcached/example-memcached -o yaml -apiVersion: cache.example.com/v1alpha1 -kind: Memcached -metadata: - clusterName: "" - creationTimestamp: 2018-03-31T22:51:08Z - generation: 0 - name: example-memcached - namespace: default - resourceVersion: "245453" - selfLink: /apis/cache.example.com/v1alpha1/namespaces/default/memcacheds/example-memcached - uid: 0026cc97-3536-11e8-bd83-0800274106a1 -spec: - size: 3 -status: - nodes: - - example-memcached-6fd7c98d8-7dqdr - - example-memcached-6fd7c98d8-g5k7v - - example-memcached-6fd7c98d8-m7vn7 -``` - -### Update the size - -Change the `spec.size` field in the memcached CR from 3 to 4 and apply the change: - -```sh -$ cat deploy/crds/cache.example.com_v1alpha1_memcached_cr.yaml -apiVersion: "cache.example.com/v1alpha1" -kind: "Memcached" -metadata: - name: "example-memcached" -spec: - size: 4 - -$ kubectl apply -f deploy/crds/cache.example.com_v1alpha1_memcached_cr.yaml -``` - -Confirm that the operator changes the deployment size: - -```sh -$ kubectl get deployment -NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE -example-memcached 4 4 4 4 5m -``` - -### Cleanup - -Clean up the resources: - -```sh -$ kubectl delete -f deploy/crds/cache.example.com_v1alpha1_memcached_cr.yaml -$ kubectl delete -f deploy/operator.yaml -$ kubectl delete -f deploy/role_binding.yaml -$ kubectl delete -f deploy/role.yaml -$ kubectl delete -f deploy/service_account.yaml -``` - -## Advanced Topics - -### Manage CR status conditions - -An often-used pattern is to include `Conditions` in the status of custom resources. Conditions represent the latest available observations of an object's state (see the [Kubernetes API conventionsdocumentation][typical-status-properties] for more information). - -The `Conditions` field added to the `MemcachedStatus` struct simplifies the management of your CR's conditions. It: -- Enables callers to add and remove conditions. -- Ensures that there are no duplicates. -- Sorts the conditions deterministically to avoid unnecessary repeated reconciliations. -- Automatically handles the each condition's `LastTransitionTime`. -- Provides helper methods to make it easy to determine the state of a condition. - -To use conditions in your custom resource, add a Conditions field to the Status struct in `_types.go`: - -```Go -import ( - "github.com/operator-framework/operator-sdk/pkg/status" -) - -type MyAppStatus struct { - // Conditions represent the latest available observations of an object's state - Conditions status.Conditions `json:"conditions"` -} -``` - -Then, in your controller, you can use [`Conditions`][godoc-conditions] methods to make it easier to set and remove conditions or check their current values. - -### Adding 3rd Party Resources To Your Operator - -The operator's Manager supports the Core Kubernetes resource types as found in the client-go [scheme][scheme_package] package and will also register the schemes of all custom resource types defined in your project under `pkg/apis`. - -```Go -import ( - "github.com/example-inc/memcached-operator/pkg/apis" - ... -) - -// Setup Scheme for all resources -if err := apis.AddToScheme(mgr.GetScheme()); err != nil { - log.Error(err, "") - os.Exit(1) -} -``` - -To add a 3rd party resource to an operator, you must add it to the Manager's scheme. By creating an `AddToScheme()` method or reusing one you can easily add a resource to your scheme. An [example][deployments_register] shows that you define a function and then use the [runtime][runtime_package] package to create a `SchemeBuilder`. - -#### Register with the Manager's scheme - -Call the `AddToScheme()` function for your 3rd party resource and pass it the Manager's scheme via `mgr.GetScheme()` -in `cmd/manager/main.go`. - -Example: -```go -import ( - .... - - routev1 "github.com/openshift/api/route/v1" -) - -func main() { - .... - - // Adding the routev1 - if err := routev1.AddToScheme(mgr.GetScheme()); err != nil { - log.Error(err, "") - os.Exit(1) - } - - .... - - // Setup all Controllers - if err := controller.AddToManager(mgr); err != nil { - log.Error(err, "") - os.Exit(1) - } -} -``` - -##### If 3rd party resource does not have `AddToScheme()` function - -Use the [SchemeBuilder][scheme_builder] package from controller-runtime to initialize a new scheme builder that can be used to register the 3rd party resource with the manager's scheme. - -Example of registering `DNSEndpoints` 3rd party resource from `external-dns`: - -```go -import ( - ... - "k8s.io/apimachinery/pkg/runtime/schema" - "sigs.k8s.io/controller-runtime/pkg/scheme" - ... - // DNSEndoints - externaldns "github.com/kubernetes-incubator/external-dns/endpoint" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - ) - -func main() { - .... - - log.Info("Registering Components.") - - schemeBuilder := &scheme.Builder{GroupVersion: schema.GroupVersion{Group: "externaldns.k8s.io", Version: "v1alpha1"}} - schemeBuilder.Register(&externaldns.DNSEndpoint{}, &externaldns.DNSEndpointList{}) - if err := schemeBuilder.AddToScheme(mgr.GetScheme()); err != nil { - log.Error(err, "") - os.Exit(1) - } - - .... - - // Setup all Controllers - if err := controller.AddToManager(mgr); err != nil { - log.Error(err, "") - os.Exit(1) - } -} -``` - - - -**NOTES:** - -* After adding new import paths to your operator project, run `go mod vendor` if a `vendor/` directory is present in the root of your project directory to fulfill these dependencies. -* Your 3rd party resource needs to be added before add the controller in `"Setup all Controllers"`. - -#### Default Metrics exported with 3rd party resource - -By default, SDK operator projects are set up to [export metrics][metrics_doc] through `addMetrics` in `cmd/manager/main.go`. See that it will call the `serveCRMetrics`: - - -```go -func serveCRMetrics(cfg *rest.Config) error { - ... - - filteredGVK, err := k8sutil.GetGVKsFromAddToScheme(apis.AddToScheme) - if err != nil { - return err - } - - ... - - // Generate and serve custom resource specific metrics. - err = kubemetrics.GenerateAndServeCRMetrics(cfg, ns, filteredGVK, metricsHost, operatorMetricsPort) - if err != nil { - return err - } - - ... -} -``` - -The `kubemetrics.GenerateAndServeCRMetrics` function requires an RBAC rule to list all GroupVersionKinds in the list of watched namespaces, so you might need to [filter](https://github.com/operator-framework/operator-sdk/blob/v0.15.2/pkg/k8sutil/k8sutil.go#L161) the kinds returned by [`k8sutil.GetGVKsFromAddToScheme`](https://godoc.org/github.com/operator-framework/operator-sdk/pkg/k8sutil#GetGVKsFromAddToScheme) more stringently to avoid authorization errors such as `Failed to list *unstructured.Unstructured`. - -In this scenario, this error may occur because your Operator RBAC roles do not include permissions to LIST the third party API schemas or the schemas which are required to them and will be added with. See that the default SDK implementation will just add the Kubernetes schemas and they will be ignored in the metrics It means that you might need to do an similar implementation to filter the third party API schemas and their dependencies added in order to provide a filtered a List of GVK(GroupVersionKind) to the `GenerateAndServeCRMetrics` method. - -### Handle Cleanup on Deletion - -To implement complex deletion logic, you can add a finalizer to your Custom Resource. This will prevent your Custom Resource from being -deleted until you remove the finalizer (ie, after your cleanup logic has successfully run). For more information, see the -[official Kubernetes documentation on finalizers](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#finalizers). - -**Example:** - -The following is a snippet from the controller file under `pkg/controller/memcached/memcached_controller.go` - -```Go -import ( - ... - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" -) - -const memcachedFinalizer = "finalizer.cache.example.com" - -func (r *ReconcileMemcached) Reconcile(request reconcile.Request) (reconcile.Result, error) { - reqLogger := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name) - reqLogger.Info("Reconciling Memcached") - - // Fetch the Memcached instance - memcached := &cachev1alpha1.Memcached{} - err := r.client.Get(context.TODO(), request.NamespacedName, memcached) - if err != nil { - if errors.IsNotFound(err) { - // Request object not found, could have been deleted after reconcile request. - // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers. - // Return and don't requeue - reqLogger.Info("Memcached resource not found. Ignoring since object must be deleted.") - return reconcile.Result{}, nil - } - // Error reading the object - requeue the request. - reqLogger.Error(err, "Failed to get Memcached.") - return reconcile.Result{}, err - } - - ... - - // Check if the Memcached instance is marked to be deleted, which is - // indicated by the deletion timestamp being set. - isMemcachedMarkedToBeDeleted := memcached.GetDeletionTimestamp() != nil - if isMemcachedMarkedToBeDeleted { - if contains(memcached.GetFinalizers(), memcachedFinalizer) { - // Run finalization logic for memcachedFinalizer. If the - // finalization logic fails, don't remove the finalizer so - // that we can retry during the next reconciliation. - if err := r.finalizeMemcached(reqLogger, memcached); err != nil { - return reconcile.Result{}, err - } - - // Remove memcachedFinalizer. Once all finalizers have been - // removed, the object will be deleted. - controllerutil.RemoveFinalizer(memcached, memcachedFinalizer) - err := r.client.Update(context.TODO(), memcached) - if err != nil { - return reconcile.Result{}, err - } - } - return reconcile.Result{}, nil - } - - // Add finalizer for this CR - if !contains(memcached.GetFinalizers(), memcachedFinalizer) { - if err := r.addFinalizer(reqLogger, memcached); err != nil { - return reconcile.Result{}, err - } - } - - ... - - return reconcile.Result{}, nil -} - -func (r *ReconcileMemcached) finalizeMemcached(reqLogger logr.Logger, m *cachev1alpha1.Memcached) error { - // TODO(user): Add the cleanup steps that the operator - // needs to do before the CR can be deleted. Examples - // of finalizers include performing backups and deleting - // resources that are not owned by this CR, like a PVC. - reqLogger.Info("Successfully finalized memcached") - return nil -} - -func (r *ReconcileMemcached) addFinalizer(reqLogger logr.Logger, m *cachev1alpha1.Memcached) error { - reqLogger.Info("Adding Finalizer for the Memcached") - controllerutil.AddFinalizer(m, memcachedFinalizer) - - // Update CR - err := r.client.Update(context.TODO(), m) - if err != nil { - reqLogger.Error(err, "Failed to update Memcached with finalizer") - return err - } - return nil -} - -func contains(list []string, s string) bool { - for _, v := range list { - if v == s { - return true - } - } - return false -} -``` - -### Metrics - -To learn about how metrics work in the Operator SDK read the [metrics section][metrics_doc] of the user documentation. - -### Leader election - -During the lifecycle of an operator it's possible that there may be more than 1 instance running at any given time e.g when rolling out an upgrade for the operator. -In such a scenario it is necessary to avoid contention between multiple operator instances via leader election so that only one leader instance handles the reconciliation while the other instances are inactive but ready to take over when the leader steps down. - -There are two different leader election implementations to choose from, each with its own tradeoff. - -- [Leader-for-life][leader_for_life]: The leader pod only gives up leadership (via garbage collection) when it is deleted. This implementation precludes the possibility of 2 instances mistakenly running as leaders (split brain). However, this method can be subject to a delay in electing a new leader. For instance when the leader pod is on an unresponsive or partitioned node, the [`pod-eviction-timeout`][pod_eviction_timeout] dictates how long it takes for the leader pod to be deleted from the node and step down (default 5m). -- [Leader-with-lease][leader_with_lease]: The leader pod periodically renews the leader lease and gives up leadership when it can't renew the lease. This implementation allows for a faster transition to a new leader when the existing leader is isolated, but there is a possibility of split brain in [certain situations][lease_split_brain]. - -By default the SDK enables the leader-for-life implementation. However you should consult the docs above for both approaches to consider the tradeoffs that make sense for your use case. - -The following examples illustrate how to use the two options: - -#### Leader for life - -A call to `leader.Become()` will block the operator as it retries until it can become the leader by creating the configmap named `memcached-operator-lock`. - -```Go -import ( - ... - "github.com/operator-framework/operator-sdk/pkg/leader" -) - -func main() { - ... - err = leader.Become(context.TODO(), "memcached-operator-lock") - if err != nil { - log.Error(err, "Failed to retry for leader lock") - os.Exit(1) - } - ... -} -``` -If the operator is not running inside a cluster `leader.Become()` will simply return without error to skip the leader election since it can't detect the operator's namespace. - -#### Leader with lease - -The leader-with-lease approach can be enabled via the [Manager Options][manager_options] for leader election. - -```Go -import ( - ... - "sigs.k8s.io/controller-runtime/pkg/manager" -) - -func main() { - ... - opts := manager.Options{ - ... - LeaderElection: true, - LeaderElectionID: "memcached-operator-lock" - } - mgr, err := manager.New(cfg, opts) - ... -} -``` - -When the operator is not running in a cluster, the Manager will return an error on starting since it can't detect the operator's namespace in order to create the configmap for leader election. You can override this namespace by setting the Manager's `LeaderElectionNamespace` option. - -[enqueue_requests_from_map_func]: https://godoc.org/sigs.k8s.io/controller-runtime/pkg/handler#EnqueueRequestsFromMapFunc -[event_handler_godocs]: https://godoc.org/sigs.k8s.io/controller-runtime/pkg/handler#hdr-EventHandlers -[event_filtering]:/docs/golang/legacy/references/event-filtering/ -[controller_options]: https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/controller#Options -[controller_godocs]: https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/controller -[controller-runtime-reconcile-godoc]: https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/reconcile#Reconciler -[operator_scope]:/docs/legacy-common/operator-scope/ -[pod_eviction_timeout]: https://kubernetes.io/docs/reference/command-line-tools-reference/kube-controller-manager/#options -[manager_options]: https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/manager#Options -[lease_split_brain]: https://github.com/kubernetes/client-go/blob/30b06a83d67458700a5378239df6b96948cb9160/tools/leaderelection/leaderelection.go#L21-L24 -[leader_for_life]: https://godoc.org/github.com/operator-framework/operator-sdk/pkg/leader -[leader_with_lease]: https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/leaderelection -[memcached_handler]: ../example/memcached-operator/handler.go.tmpl -[memcached_controller]: https://github.com/operator-framework/operator-sdk/blob/master/example/memcached-operator/memcached_controller.go.tmpl -[layout_doc]:/docs/golang/legacy/references/project-layout/ -[homebrew_tool]:https://brew.sh/ -[go_mod_wiki]: https://github.com/golang/go/wiki/Modules -[go_vendoring]: https://blog.gopheracademy.com/advent-2015/vendor-folder/ -[scheme_package]:https://github.com/kubernetes/client-go/blob/master/kubernetes/scheme/register.go -[deployments_register]: https://github.com/kubernetes/api/blob/master/apps/v1/register.go#L41 -[doc_client_api]:/docs/golang/legacy/references/client/ -[runtime_package]: https://godoc.org/k8s.io/apimachinery/pkg/runtime -[manager_go_doc]: https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/manager#Manager -[controller-go-doc]: https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg#hdr-Controller -[request-go-doc]: https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/reconcile#Request -[result_go_doc]: https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/reconcile#Result -[metrics_doc]:/docs/golang/legacy/monitoring/ -[multi-namespaced-cache-builder]: https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/cache#MultiNamespacedCacheBuilder -[scheme_builder]: https://godoc.org/sigs.k8s.io/controller-runtime/pkg/scheme#Builder -[typical-status-properties]: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties -[godoc-conditions]: https://godoc.org/github.com/operator-framework/operator-sdk/pkg/status#Conditions -[quickstart-bundle]: /docs/olm-integration/legacy/quickstart-bundle -[new_docs]:/docs/golang/quickstart -[new_CLI]:/docs/new-cli diff --git a/website/content/en/docs/golang/legacy/references/_index.md b/website/content/en/docs/golang/legacy/references/_index.md deleted file mode 100644 index 55fa04dee3..0000000000 --- a/website/content/en/docs/golang/legacy/references/_index.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: Golang Based Operator Reference -linkTitle: Reference -weight: 300 ---- \ No newline at end of file diff --git a/website/content/en/docs/golang/legacy/references/client.md b/website/content/en/docs/golang/legacy/references/client.md deleted file mode 100644 index 754af36ab1..0000000000 --- a/website/content/en/docs/golang/legacy/references/client.md +++ /dev/null @@ -1,570 +0,0 @@ ---- -title: Using the Controller Runtime Client API with Operator SDK -linkTitle: Controller Runtime Client API -weight: 1 ---- - -## Overview - -The [`controller-runtime`][repo-controller-runtime] library provides various abstractions to watch and reconcile resources in a Kubernetes cluster via CRUD (Create, Update, Delete, as well as Get and List in this case) operations. Operators use at least one controller to perform a coherent set of tasks within a cluster, usually through a combination of CRUD operations. The Operator SDK uses controller-runtime's [Client][doc-client-client] interface, which provides the interface for these operations. - -controller-runtime defines several interfaces used for cluster interaction: -- `client.Client`: implementers perform CRUD operations on a Kubernetes cluster. -- `manager.Manager`: manages shared dependencies, such as Caches and Clients. -- `reconcile.Reconciler`: compares provided state with actual cluster state and updates the cluster on finding state differences using a Client. - -Clients are the focus of this document. A separate document will discuss Managers. - -## Client Usage - -### Default Client - -The SDK relies on a `manager.Manager` to create a `client.Client` interface that performs Create, Update, Delete, Get, and List operations within a `reconcile.Reconciler`'s Reconcile function. The SDK will generate code to create a Manager, which holds a Cache and a Client to be used in CRUD operations and communicate with the API server. By default a Controller's Reconciler will be populated with the Manager's Client which is a [split-client][doc-split-client]. - -`pkg/controller//_controller.go`: -```Go -func newReconciler(mgr manager.Manager) reconcile.Reconciler { - return &ReconcileKind{client: mgr.GetClient(), scheme: mgr.GetScheme()} -} - -type ReconcileKind struct { - // Populated above from a manager.Manager. - client client.Client - scheme *runtime.Scheme -} -``` - -A split client reads (Get and List) from the Cache and writes (Create, Update, Delete) to the API server. Reading from the Cache significantly reduces request load on the API server; as long as the Cache is updated by the API server, read operations are eventually consistent. - -### Non-default Client - -An operator developer may wish to create their own Client that serves read requests(Get List) from the API server instead of the cache, for example. controller-runtime provides a [constructor][doc-client-constr] for Clients: - -```Go -// New returns a new Client using the provided config and Options. -func New(config *rest.Config, options client.Options) (client.Client, error) -``` - -`client.Options` allow the caller to specify how the new Client should communicate with the API server. - -```Go -// Options are creation options for a Client -type Options struct { - // Scheme, if provided, will be used to map go structs to GroupVersionKinds - Scheme *runtime.Scheme - - // Mapper, if provided, will be used to map GroupVersionKinds to Resources - Mapper meta.RESTMapper -} -``` -Example: -```Go -import ( - "sigs.k8s.io/controller-runtime/pkg/client/config" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -cfg, err := config.GetConfig() -... -c, err := client.New(cfg, client.Options{}) -... -``` - -**Note**: defaults are set by `client.New` when Options are empty. The default [scheme][code-scheme-default] will have the [core][doc-k8s-core] Kubernetes resource types registered. The caller *must* set a scheme that has custom operator types registered for the new Client to recognize these types. - -Creating a new Client is not usually necessary nor advised, as the default Client is sufficient for most use cases. - -### Reconcile and the Client API - -A Reconciler implements the [`reconcile.Reconciler`][doc-reconcile-reconciler] interface, which exposes the Reconcile method. Reconcilers are added to a corresponding Controller for a Kind; Reconcile is called in response to cluster or external Events, with a `reconcile.Request` object argument, to read and write cluster state by the Controller, and returns a `reconcile.Result`. SDK Reconcilers have access to a Client in order to make Kubernetes API calls. - -**Note**: For those familiar with the SDK's old project semantics, [Handle][doc-osdk-handle] received resource events and reconciled state for multiple resource types, whereas Reconcile receives resource events and reconciles state for a single resource type. - -```Go -// ReconcileKind reconciles a Kind object -type ReconcileKind struct { - // client, initialized using mgr.Client() above, is a split client - // that reads objects from the cache and writes to the apiserver - client client.Client - - // scheme defines methods for serializing and deserializing API objects, - // a type registry for converting group, version, and kind information - // to and from Go schemas, and mappings between Go schemas of different - // versions. A scheme is the foundation for a versioned API and versioned - // configuration over time. - scheme *runtime.Scheme -} - -// Reconcile watches for Events and reconciles cluster state with desired -// state defined in the method body. -// The Controller will requeue the Request to be processed again if an error -// is non-nil or Result.Requeue is true, otherwise upon completion it will -// remove the work from the queue. -func (r *ReconcileKind) Reconcile(request reconcile.Request) (reconcile.Result, error) -``` - -Reconcile is where Controller business logic lives, i.e. where Client API calls are made via `ReconcileKind.client`. A `client.Client` implementer performs the following operations: - -#### Get - -```Go -// Get retrieves an API object for a given object key from the Kubernetes cluster -// and stores it in obj. -func (c Client) Get(ctx context.Context, key ObjectKey, obj runtime.Object) error -``` -**Note**: An `ObjectKey` is simply a `client` package alias for [`types.NamespacedName`][doc-types-nsname]. - -Example: -```Go -import ( - "context" - "github.com/example-org/app-operator/pkg/apis/cache/v1alpha1" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -func (r *ReconcileApp) Reconcile(request reconcile.Request) (reconcile.Result, error) { - ... - - app := &v1alpha1.App{} - ctx := context.TODO() - err := r.client.Get(ctx, request.NamespacedName, app) - - ... -} -``` - -#### List - -```Go -// List retrieves a list of objects for a given namespace and list options -// and stores the list in obj. -func (c Client) List(ctx context.Context, list runtime.Object, opts ...client.ListOption) error -``` - -A `client.ListOption` is an interface that sets [`client.ListOptions`][list-options] fields. A `client.ListOption` is created by using one of the provided implementations: [`MatchingLabels`][matching-labels], [`MatchingFields`][matching-fields], [`InNamespace`][in-namespace]. - -Example: - -```Go -import ( - "context" - "fmt" - "k8s.io/api/core/v1" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -func (r *ReconcileApp) Reconcile(request reconcile.Request) (reconcile.Result, error) { - ... - - // Return all pods in the request namespace with a label of `app=` - // and phase `Running`. - podList := &v1.PodList{} - opts := []client.ListOption{ - client.InNamespace(request.NamespacedName.Namespace), - client.MatchingLabels{"app": request.NamespacedName.Name}, - client.MatchingFields{"status.phase": "Running"}, - } - ctx := context.TODO() - err := r.client.List(ctx, podList, opts...) - - ... -} -``` - -[list-options]:https://godoc.org/sigs.k8s.io/controller-runtime/pkg/client#ListOptions -[matching-labels]:https://godoc.org/sigs.k8s.io/controller-runtime/pkg/client#MatchingLabels -[matching-fields]:https://godoc.org/sigs.k8s.io/controller-runtime/pkg/client#MatchingFields -[in-namespace]:https://godoc.org/sigs.k8s.io/controller-runtime/pkg/client#InNamespace - -#### Create - -```Go -// Create saves the object obj in the Kubernetes cluster. -// Returns an error -func (c Client) Create(ctx context.Context, obj runtime.Object, opts ...client.CreateOption) error -``` - -A `client.CreateOption` is an interface that sets [`client.CreateOptions`][create-options] fields. A `client.CreateOption` is created by using one of the provided implementations: [`DryRunAll`][dry-run-all], [`ForceOwnership`][force-ownership]. Generally these options are not needed. - -Example: - -```Go -import ( - "context" - "k8s.io/api/apps/v1" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -func (r *ReconcileApp) Reconcile(request reconcile.Request) (reconcile.Result, error) { - ... - - app := &v1.Deployment{ // Any cluster object you want to create. - ... - } - ctx := context.TODO() - err := r.client.Create(ctx, app) - - ... -} -``` - -[create-options]:https://godoc.org/sigs.k8s.io/controller-runtime/pkg/client#CreateOptions - -#### Update - -```Go -// Update updates the given obj in the Kubernetes cluster. obj must be a -// struct pointer so that obj can be updated with the content returned -// by the API server. Update does *not* update the resource's status -// subresource -func (c Client) Update(ctx context.Context, obj runtime.Object, opts ...client.UpdateOption) error -``` - -A `client.UpdateOption` is an interface that sets [`client.UpdateOptions`][update-options] fields. A `client.UpdateOption` is created by using one of the provided implementations: [`DryRunAll`][dry-run-all], [`ForceOwnership`][force-ownership]. Generally these options are not needed. - -Example: - -```Go -import ( - "context" - "k8s.io/api/apps/v1" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -func (r *ReconcileApp) Reconcile(request reconcile.Request) (reconcile.Result, error) { - ... - - dep := &v1.Deployment{} - err := r.client.Get(context.TODO(), request.NamespacedName, dep) - - ... - - ctx := context.TODO() - dep.Spec.Selector.MatchLabels["is_running"] = "true" - err := r.client.Update(ctx, dep) - - ... -} -``` - -[update-options]:https://godoc.org/sigs.k8s.io/controller-runtime/pkg/client#UpdateOptions - -#### Patch - -```Go -// Patch patches the given obj in the Kubernetes cluster. obj must be a -// struct pointer so that obj can be updated with the content returned by the Server. -func (c Client) Patch(ctx context.Context, obj runtime.Object, patch client.Patch, opts ...client.UpdateOption) error -``` - -A `client.PatchOption` is an interface that sets [`client.PatchOptions`][patch-options] fields. A `client.PatchOption` is created by using one of the provided implementations: [`DryRunAll`][dry-run-all], [`ForceOwnership`][force-ownership]. Generally these options are not needed. - -Example: - -```Go -import ( - "context" - "k8s.io/api/apps/v1" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -func (r *ReconcileApp) Reconcile(request reconcile.Request) (reconcile.Result, error) { - ... - - dep := &v1.Deployment{} - err := r.client.Get(context.TODO(), request.NamespacedName, dep) - - ... - - ctx := context.TODO() - // A merge patch will preserve other fields modified at runtime. - patch := client.MergeFrom(dep.DeepCopy()) - dep.Spec.Selector.MatchLabels["is_running"] = "true" - err := r.client.Patch(ctx, dep, patch) - - ... -} -``` - -[patch-options]:https://godoc.org/sigs.k8s.io/controller-runtime/pkg/client#PatchOption -[dry-run-all]:https://godoc.org/sigs.k8s.io/controller-runtime/pkg/client#DryRunAll -[force-ownership]:https://godoc.org/sigs.k8s.io/controller-runtime/pkg/client#ForceOwnership - -##### Updating Status Subresource - -When updating the [status subresource][cr-status-subresource] from the client, the [`StatusWriter`][status-writer] must be used. The status subresource is retrieved with `Status()` and updated with `Update()` or patched with `Patch()`. - -`Update()` takes variadic `client.UpdateOption`'s, and `Patch()` takes variadic `client.PatchOption`'s. See [`Client.Update()`](#update) and [`Client.Patch()`](#patch) for more details. Generally these options are not needed. - -##### Status - -```Go -// Status() returns a StatusWriter object that can be used to update the -// object's status subresource -func (c Client) Status() (client.StatusWriter, error) -``` - -Example: -```Go -import ( - "context" - cachev1alpha1 "github.com/example-inc/memcached-operator/pkg/apis/cache/v1alpha1" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -func (r *ReconcileApp) Reconcile(request reconcile.Request) (reconcile.Result, error) { - ... - - ctx := context.TODO() - mem := &cachev1alpha1.Memcached{} - err := r.client.Get(ctx, request.NamespacedName, mem) - - ... - - // Update - mem.Status.Nodes = []string{"pod1", "pod2"} - err := r.client.Status().Update(ctx, mem) - - ... - - // Patch - patch := client.MergeFrom(mem.DeepCopy()) - mem.Status.Nodes = []string{"pod1", "pod2", "pod3"} - err := r.client.Status().Patch(ctx, mem, patch) - - ... -} -``` - -[status-writer]:https://godoc.org/sigs.k8s.io/controller-runtime/pkg/client#StatusWriter - -#### Delete - -```Go -// Delete deletes the given obj from Kubernetes cluster. -func (c Client) Delete(ctx context.Context, obj runtime.Object, opts ...client.DeleteOption) error -``` - -A `client.DeleteOption` is an interface that sets [`client.DeleteOptions`][delete-opts] fields. A `client.DeleteOption` is created by using one of the provided implementations: [`GracePeriodSeconds`][grace-period-seconds], [`Preconditions`][preconditions], [`PropagationPolicy`][propagation-policy]. - -Example: - -```Go -import ( - "context" - "k8s.io/api/core/v1" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -func (r *ReconcileApp) Reconcile(request reconcile.Request) (reconcile.Result, error) { - ... - - pod := &v1.Pod{} - err := r.client.Get(context.TODO(), request.NamespacedName, pod) - - ... - - ctx := context.TODO() - if pod.Status.Phase == v1.PodUnknown { - // Delete the pod after 5 seconds. - err := r.client.Delete(ctx, pod, client.GracePeriodSeconds(5)) - ... - } - - ... -} -``` - -[delete-opts]:https://godoc.org/sigs.k8s.io/controller-runtime/pkg/client#DeleteOptions -[grace-period-seconds]:https://godoc.org/sigs.k8s.io/controller-runtime/pkg/client#GracePeriodSeconds -[preconditions]:https://godoc.org/sigs.k8s.io/controller-runtime/pkg/client#Preconditions -[propagation-policy]:https://godoc.org/sigs.k8s.io/controller-runtime/pkg/client#PropagationPolicy - -#### DeleteAllOf - -```Go -// DeleteAllOf deletes all objects of the given type matching the given options. -func (c Client) DeleteAllOf(ctx context.Context, obj runtime.Object, opts ...client.DeleteAllOfOption) error -``` - -A `client.DeleteAllOfOption` is an interface that sets [`client.DeleteAllOfOptions`][deleteallof-opts] fields. A `client.DeleteAllOfOption` wraps a [`client.ListOption`](#list) and [`client.DeleteOption`](#delete). - -Example: - -```Go -import ( - "context" - "fmt" - "k8s.io/api/core/v1" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -func (r *ReconcileApp) Reconcile(request reconcile.Request) (reconcile.Result, error) { - ... - - // Delete all pods in the request namespace with a label of `app=` - // and phase `Failed`. - pod := &v1.Pod{} - opts := []client.DeleteAllOfOption{ - client.InNamespace(request.NamespacedName.Namespace), - client.MatchingLabels{"app", request.NamespacedName.Name}, - client.MatchingFields{"status.phase": "Failed"}, - client.GracePeriodSeconds(5), - } - ctx := context.TODO() - err := r.client.DeleteAllOf(ctx, pod, opts...) - - ... -} -``` - -[deleteallof-opts]:https://godoc.org/sigs.k8s.io/controller-runtime/pkg/client#DeleteAllOfOptions - -### Example usage - -```Go -import ( - "context" - "reflect" - - appv1alpha1 "github.com/example-org/app-operator/pkg/apis/app/v1alpha1" - - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -type ReconcileApp struct { - client client.Client - scheme *runtime.Scheme -} - -func (r *ReconcileApp) Reconcile(request reconcile.Request) (reconcile.Result, error) { - - // Fetch the App instance. - app := &appv1alpha1.App{} - err := r.client.Get(context.TODO(), request.NamespacedName, app) - if err != nil { - if errors.IsNotFound(err) { - return reconcile.Result{}, nil - } - return reconcile.Result{}, err - } - - // Check if the deployment already exists, if not create a new deployment. - found := &appsv1.Deployment{} - err = r.client.Get(context.TODO(), types.NamespacedName{Name: app.Name, Namespace: app.Namespace}, found) - if err != nil { - if errors.IsNotFound(err) { - // Define and create a new deployment. - dep := r.deploymentForApp(app) - if err = r.client.Create(context.TODO(), dep); err != nil { - return reconcile.Result{}, err - } - return reconcile.Result{Requeue: true}, nil - } else { - return reconcile.Result{}, err - } - } - - // Ensure the deployment size is the same as the spec. - size := app.Spec.Size - if *found.Spec.Replicas != size { - found.Spec.Replicas = &size - if err = r.client.Update(context.TODO(), found); err != nil { - return reconcile.Result{}, err - } - return reconcile.Result{Requeue: true}, nil - } - - // Update the App status with the pod names. - // List the pods for this app's deployment. - podList := &corev1.PodList{} - listOpts := []client.ListOption{ - client.InNamespace(app.Namespace), - client.MatchingLabels(labelsForApp(app.Name)), - } - if err = r.client.List(context.TODO(), podList, listOpts...); err != nil { - return reconcile.Result{}, err - } - - // Update status.Nodes if needed. - podNames := getPodNames(podList.Items) - if !reflect.DeepEqual(podNames, app.Status.Nodes) { - app.Status.Nodes = podNames - if err := r.client.Status().Update(context.TODO(), app); err != nil { - return reconcile.Result{}, err - } - } - - return reconcile.Result{}, nil -} - -// deploymentForApp returns a app Deployment object. -func (r *ReconcileKind) deploymentForApp(m *appv1alpha1.App) *appsv1.Deployment { - lbls := labelsForApp(m.Name) - replicas := m.Spec.Size - - dep := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: m.Name, - Namespace: m.Namespace, - }, - Spec: appsv1.DeploymentSpec{ - Replicas: &replicas, - Selector: &metav1.LabelSelector{ - MatchLabels: lbls, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: lbls, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{{ - Image: "app:alpine", - Name: "app", - Command: []string{"app", "-a=64", "-b"}, - Ports: []corev1.ContainerPort{{ - ContainerPort: 10000, - Name: "app", - }}, - }}, - }, - }, - }, - } - - // Set App instance as the owner and controller. - // NOTE: calling SetControllerReference, and setting owner references in - // general, is important as it allows deleted objects to be garbage collected. - controllerutil.SetControllerReference(m, dep, r.scheme) - return dep -} - -// labelsForApp creates a simple set of labels for App. -func labelsForApp(name string) map[string]string { - return map[string]string{"app_name": "app", "app_cr": name} -} -``` - -[repo-controller-runtime]:https://github.com/kubernetes-sigs/controller-runtime -[doc-client-client]:https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/client#Client -[doc-split-client]:https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/client#DelegatingClient -[doc-client-constr]:https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/client#New -[code-scheme-default]:https://github.com/kubernetes-sigs/controller-runtime/blob/master/pkg/client/client.go#L51 -[doc-k8s-core]:https://godoc.org/k8s.io/api/core/v1 -[doc-reconcile-reconciler]:https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/reconcile#Reconciler -[doc-osdk-handle]:https://github.com/operator-framework/operator-sdk/blob/master/design/milestone-0.0.2/action-api.md#handler -[doc-types-nsname]:https://godoc.org/k8s.io/apimachinery/pkg/types#NamespacedName -[cr-status-subresource]:https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#status-subresource diff --git a/website/content/en/docs/golang/legacy/references/event-filtering.md b/website/content/en/docs/golang/legacy/references/event-filtering.md deleted file mode 100644 index 1ebc7cbdd4..0000000000 --- a/website/content/en/docs/golang/legacy/references/event-filtering.md +++ /dev/null @@ -1,116 +0,0 @@ ---- -title: Using Predicates for Event Filtering with Operator SDK -linkTitle: Using Predicates for Event Filtering -weight: 2 ---- - -[Events][doc_event] are produced by [Sources][doc_source] assigned to resources a controller is watching. These events are transformed into Requests by [EventHandlers][doc_eventhandler] and passed to `Reconcile()`. [Predicates][doc_predicate] allow controllers to filter events before they are provided to EventHandlers. Filtering is useful because your controller may only want to handle specific types of events. Filtering also helps reduce chattiness with the API server, as `Reconcile()` is only called for events transformed by EventHandlers. - -## Predicate types - -A Predicate implements the following methods that take an event of a particular type and return true if the event should be processed by `Reconcile()`: - -```Go -// Predicate filters events before enqueuing the keys. -type Predicate interface { - Create(event.CreateEvent) bool - Delete(event.DeleteEvent) bool - Update(event.UpdateEvent) bool - Generic(event.GenericEvent) bool -} - -// Funcs implements Predicate. -type Funcs struct { - CreateFunc func(event.CreateEvent) bool - DeleteFunc func(event.DeleteEvent) bool - UpdateFunc func(event.UpdateEvent) bool - GenericFunc func(event.GenericEvent) bool -} -``` - -For example, all Create events for any watched resource will be passed to `Funcs.Create()` and filtered out if the method evaluates to `false`. If you do not register a Predicate method for a particular type, events of that type will not be filtered. - -All event types contain Kubernetes [metadata][doc_object_metadata] about the object that triggered the event, and the object itself. Predicate logic uses these data to make decisions about what should be filtered. Some event types include other fields pertaining to the semantics of that event. For example, `event.UpdateEvent` includes both old and new metadata and objects: - -```Go -type UpdateEvent struct { - // MetaOld is the ObjectMeta of the Kubernetes Type that was updated (before the update). - MetaOld v1.Object - - // ObjectOld is the object from the event. - ObjectOld runtime.Object - - // MetaNew is the ObjectMeta of the Kubernetes Type that was updated (after the update). - MetaNew v1.Object - - // ObjectNew is the object from the event. - ObjectNew runtime.Object -} -``` - -You can find all type definitions in the `event` package [documentation][doc_event]. - -## Using Predicates - -Any number of Predicates can be passed to `controller.Watch()`, which will filter an event if any of those Predicates evaluates to `false`. This first example is an implementation of a `memcached-operator` controller that simply filters Delete events on Pods that have been confirmed deleted; the controller receives all Delete events that occur, and we may only care about resources that have not been completely deleted: - -```Go -import ( - cachev1alpha1 "github.com/example-inc/app-operator/pkg/apis/cache/v1alpha1" - - corev1 "k8s.io/api/core/v1" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/predicate" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" -) - -// add adds a new Controller to mgr with r as the reconcile.Reconciler. -func add(mgr manager.Manager, r reconcile.Reconciler) error { - // Create a new controller. - c, err := controller.New("memcached-controller", mgr, controller.Options{Reconciler: r}) - if err != nil { - return err - } - - ... - - // Create a source for watching Pod events. - src := &source.Kind{Type: &corev1.Pod{}} - // Create a handler for handling events from Pods owned by the Memcached resource. - h := &handler.EnqueueRequestForOwner{ - IsController: true, - OwnerType: &cachev1alpha1.Memcached{}, - } - pred := predicate.Funcs{ - UpdateFunc: func(e event.UpdateEvent) bool { - // Ignore updates to CR status in which case metadata.Generation does not change - return e.MetaOld.GetGeneration() != e.MetaNew.GetGeneration() - }, - DeleteFunc: func(e event.DeleteEvent) bool { - // Evaluates to false if the object has been confirmed deleted. - return !e.DeleteStateUnknown - }, - } - // Watch for Pod events. - err = c.Watch(src, h, pred) - if err != nil { - return err - } - - ... -} -``` - -## Use cases - -Predicates are not necessary for many operators, although filtering reduces the amount of chatter to the API server from `Reconcile()`. They are particularly useful for controllers that watch resources cluster-wide, i.e. without a namespace. - -[doc_event]:https://godoc.org/sigs.k8s.io/controller-runtime/pkg/event -[doc_source]:https://godoc.org/sigs.k8s.io/controller-runtime/pkg/source#Source -[doc_eventhandler]:https://godoc.org/sigs.k8s.io/controller-runtime/pkg/handler#EventHandler -[doc_predicate]:https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/predicate -[doc_object_metadata]:https://godoc.org/k8s.io/apimachinery/pkg/apis/meta/v1#Object diff --git a/website/content/en/docs/golang/legacy/references/logging.md b/website/content/en/docs/golang/legacy/references/logging.md deleted file mode 100644 index b294ffbbe9..0000000000 --- a/website/content/en/docs/golang/legacy/references/logging.md +++ /dev/null @@ -1,279 +0,0 @@ ---- -title: Operator SDK Logging -linkTitle: Logging -weight: 3 ---- - -Operator SDK-generated operators use the [`logr`][godoc_logr] interface to log. This log interface has several backends such as [`zap`][repo_zapr], which the SDK uses in generated code by default. [`logr.Logger`][godoc_logr_logger] exposes [structured logging][site_struct_logging] methods that help create machine-readable logs and adding a wealth of information to log records. - -## Default zap logger - -Operator SDK uses a `zap`-based `logr` backend when scaffolding new projects. To assist with configuring and using this logger, the SDK includes several helper functions. - -In the simple example below, we add the zap flagset to the operator's command line flags with `zap.FlagSet()`, and then set the controller-runtime logger with `zap.Logger()`. - -By default, `zap.Logger()` will return a logger that is ready for production use. It uses a JSON encoder, logs starting at the `info` level, and has [sampling][zap_sampling] enabled. To customize the default behavior, users can use the zap flagset and specify flags on the command line. The zap flagset includes the following flags that can be used to configure the logger: - -* `--zap-devel` - Enables the zap development config (changes defaults to console encoder, debug log level, and disables sampling) (default: `false`) -* `--zap-encoder` string - Sets the zap log encoding (`json` or `console`) -* `--zap-level` string or integer - Sets the zap log level (`debug`, `info`, `error`, or an integer value greater than 0). If 4 or greater the verbosity of client-go will be set to this level. -* `--zap-sample` - Enables zap's sampling mode. Sampling will be disabled for integer log levels greater than 1. -* `--zap-stacktrace-level` - Set the minimum log level that triggers stacktrace generation (default: `error`) -* `--zap-time-encoding` string - Sets the zap time format (`epoch`, `millis`, `nano`, or `iso8601`) - -### A simple example - -Operators set the logger for all operator logging in [`cmd/manager/main.go`][code_set_logger]. To illustrate how this works, try out this simple example: - -```Go -package main - -import ( - "github.com/operator-framework/operator-sdk/pkg/log/zap" - "github.com/spf13/pflag" - logf "sigs.k8s.io/controller-runtime/pkg/log" -) - -var globalLog = logf.Log.WithName("global") - -func main() { - pflag.CommandLine.AddFlagSet(zap.FlagSet()) - pflag.Parse() - logf.SetLogger(zap.Logger()) - scopedLog := logf.Log.WithName("scoped") - - globalLog.Info("Printing at INFO level") - globalLog.V(1).Info("Printing at DEBUG level") - scopedLog.Info("Printing at INFO level") - scopedLog.V(1).Info("Printing at DEBUG level") -} -``` - -#### Output using the defaults -```console -$ go run main.go -{"level":"info","ts":1559866292.307987,"logger":"global","msg":"Printing at INFO level"} -{"level":"info","ts":1559866292.308039,"logger":"scoped","msg":"Printing at INFO level"} -``` - -#### Output overriding the log level to 1 (debug) -```console -$ go run main.go --zap-level=1 -{"level":"info","ts":1559866310.065048,"logger":"global","msg":"Printing at INFO level"} -{"level":"debug","ts":1559866310.0650969,"logger":"global","msg":"Printing at DEBUG level"} -{"level":"info","ts":1559866310.065119,"logger":"scoped","msg":"Printing at INFO level"} -{"level":"debug","ts":1559866310.065123,"logger":"scoped","msg":"Printing at DEBUG level"} -``` -## Custom zap logger - -In order to use a custom zap logger, [`zap`][controller_runtime_zap] from controller-runtime can be utilized to wrap it in a logr implementation. - -Below is an example illustrating the use of [`zap-logfmt`][logfmt_repo] in logging. - -### Example - -In your `main.go` file, replace the current implementation for logs inside the `main` function: - -```Go -... -// Add the zap logger flag set to the CLI. The flag set must -// be added before calling pflag.Parse(). -pflag.CommandLine.AddFlagSet(zap.FlagSet()) - -// Add flags registered by imported packages (e.g. glog and -// controller-runtime) -pflag.CommandLine.AddGoFlagSet(flag.CommandLine) - -pflag.Parse() - -// Use a zap logr.Logger implementation. If none of the zap -// flags are configured (or if the zap flag set is not being -// used), this defaults to a production zap logger. -// The logger instantiated here can be changed to any logger -// implementing the logr.Logger interface. This logger will -// be propagated through the whole operator, generating -// uniform and structured logs. -logf.SetLogger(zap.Logger()) -... -``` - -With: - -```Go -configLog := zap.NewProductionEncoderConfig() -configLog.EncodeTime = func(ts time.Time, encoder zapcore.PrimitiveArrayEncoder) { - encoder.AppendString(ts.UTC().Format(time.RFC3339)) -} -logfmtEncoder := zaplogfmt.NewEncoder(configLog) - -// Construct a new logr.logger. -log = zapcr.New(zapcr.UseDevMode(true), zapcr.WriteTo(os.Stdout), zapcr.Encoder(logfmtEncoder)) - -// Set the controller logger to log, which will -// be propagated through the whole operator, generating -// uniform and structured logs. -logf.SetLogger(log) -``` - -Ensure that the following additional imports are being used: - -```Go -import( - ... - zaplogfmt "github.com/sykesm/zap-logfmt" - zapcr "sigs.k8s.io/controller-runtime/pkg/log/zap" - logf "sigs.k8s.io/controller-runtime/pkg/log" - "go.uber.org/zap" - "go.uber.org/zap/zapcore" - ... -) -``` -**NOTE**: For this example, you will need to add the module `"github.com/sykesm/zap-logfmt"` to your project. Run `go get -u github.com/sykesm/zap-logfmt`. - -To test, the following print statement can be added in the main function: - -`log.Info("Printing at INFO LEVEL")` - -#### Output using custom zap logger - -```console -$ operator-sdk run local -ts=2020-02-27T23:10:33Z level=info msg="Printing at INFO level" -ts=2020-02-27T23:10:33Z level=info msg="Operator Version: 0.0.1" -ts=2020-02-27T23:10:33Z level=info msg="Go Version: go1.13.8" -ts=2020-02-27T23:10:33Z level=info msg="Go OS/Arch: darwin/amd64" -ts=2020-02-27T23:10:33Z level=info msg="Version of operator-sdk: v0.15.2" -``` - -By using `sigs.k8s.io/controller-runtime/pkg/log`, your logger is propagated through `controller-runtime`. Any logs produced by `controller-runtime` code will be through your logger, and therefore have the same formatting and destination. - -### Setting flags when running locally - -When running locally with `operator-sdk run local`, you can use the `--operator-flags` flag to pass additional flags to your operator, including the zap flags. For example: - -```console -$ operator-sdk run local --operator-flags="--zap-level=debug --zap-encoder=console"` -``` - -### Setting flags when deploying to a cluster - -When deploying your operator to a cluster you can set additional flags using an `args` array in your operator's `container` spec. For example: - -```yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: memcached-operator -spec: - replicas: 1 - selector: - matchLabels: - name: memcached-operator - template: - metadata: - labels: - name: memcached-operator - spec: - serviceAccountName: memcached-operator - containers: - - name: memcached-operator - # Replace this with the built image name - image: REPLACE_IMAGE - command: - - memcached-operator - args: - - "--zap-level=debug" - - "--zap-encoder=console" - imagePullPolicy: Always - env: - - name: WATCH_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: OPERATOR_NAME - value: "memcached-operator" -``` - -## Creating a structured log statement - -There are two ways to create structured logs with `logr`. You can create new loggers using `log.WithValues(keyValues)` that include `keyValues`, a list of key-value pair `interface{}`'s, in each log record. Alternatively you can include `keyValues` directly in a log statement, as all `logr` log statements take some message and `keyValues`. The signature of `logr.Error()` has an `error`-type parameter, which can be `nil`. - -An example from [`memcached_controller.go`][code_memcached_controller]: - -```Go -package memcached - -import ( - logf "sigs.k8s.io/controller-runtime/pkg/log" -) - -// Set a global logger for the memcached package. Each log record produced -// by this logger will have an identifier containing "controller_memcached". -// These names are hierarchical; the name attached to memcached log statements -// will be "operator-sdk.controller_memcached" because SDKLog has name -// "operator-sdk". -var log = logf.Log.WithName("controller_memcached") - -func (r *ReconcileMemcached) Reconcile(request reconcile.Request) (reconcile.Result, error) { - // Create a logger for Reconcile() that includes "Request.Namespace" - // and "Request.Name" in each log record from this log statement. - reqLogger := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name) - reqLogger.Info("Reconciling Memcached.") - - memcached := &cachev1alpha1.Memcached{} - err := r.client.Get(context.TODO(), request.NamespacedName, memcached) - if err != nil { - if errors.IsNotFound(err) { - reqLogger.Info("Memcached resource not found. Ignoring since object must be deleted.") - return reconcile.Result{}, nil - } - return reconcile.Result{}, err - } - - found := &appsv1.Deployment{} - err = r.client.Get(context.TODO(), types.NamespacedName{Name: memcached.Name, Namespace: memcached.Namespace}, found) - if err != nil { - if errors.IsNotFound(err) { - dep := r.deploymentForMemcached(memcached) - // Include "Deployment.Namespace" and "Deployment.Name" in records - // produced by this particular log statement. "Request.Namespace" and - // "Request.Name" will also be included from reqLogger. - reqLogger.Info("Creating a new Deployment", "Deployment.Namespace", dep.Namespace, "Deployment.Name", dep.Name) - err = r.client.Create(context.TODO(), dep) - if err != nil { - // Include the error in records produced by this log statement. - reqLogger.Error(err, "Failed to create new Deployment", "Deployment.Namespace", dep.Namespace, "Deployment.Name", dep.Name) - return reconcile.Result{}, err - } - } - return reconcile.Result{}, err - } - - ... -} -``` - -Log records will look like the following (from `reqLogger.Error()` above): - -``` -2018-11-08T00:00:25.700Z ERROR operator-sdk.controller_memcached pkg/controller/memcached/memcached_controller.go:118 Failed to create new Deployment {"Request.Namespace", "memcached", "Request.Name", "memcached-operator", "Deployment.Namespace", "memcached", "Deployment.Name", "memcached-operator"} -``` - -## Non-default logging - -If you do not want to use `logr` as your logging tool, you can remove `logr`-specific statements without issue from your operator's code, including the `logr` [setup code][code_set_logger] in `cmd/manager/main.go`, and add your own. Note that removing `logr` setup code will prevent `controller-runtime` from logging. - - -[godoc_logr]:https://godoc.org/github.com/go-logr/logr -[repo_zapr]:https://godoc.org/github.com/go-logr/zapr -[godoc_logr_logger]:https://godoc.org/github.com/go-logr/logr#Logger -[site_struct_logging]:https://www.client9.com/structured-logging-in-golang/ -[code_memcached_controller]:https://github.com/operator-framework/operator-sdk/blob/master/example/memcached-operator/memcached_controller.go.tmpl -[code_set_logger]:https://github.com/operator-framework/operator-sdk/blob/4d66be409a69d169aaa29d470242a1defbaf08bb/internal/pkg/scaffold/cmd.go#L92-L96 -[zap_sampling]:https://github.com/uber-go/zap/blob/master/FAQ.md#why-sample-application-logs -[logfmt_repo]:https://github.com/jsternberg/zap-logfmt -[controller_runtime_zap]:https://github.com/kubernetes-sigs/controller-runtime/tree/master/pkg/log/zap diff --git a/website/content/en/docs/golang/legacy/references/markers.md b/website/content/en/docs/golang/legacy/references/markers.md deleted file mode 100644 index 5a202ae05e..0000000000 --- a/website/content/en/docs/golang/legacy/references/markers.md +++ /dev/null @@ -1,210 +0,0 @@ ---- -title: API Markers -linkTitle: API Markers -weight: 5 ---- - -This document describes [code markers][markers] supported by the SDK. - -## ClusterServiceVersion markers - -This section details ClusterServiceVersion (CSV) [code markers][code-markers-design] and lists available markers. - -**Note:** CSV markers can only be used in Go Operator projects. Annotations for Ansible and Helm Operator projects will be added in the future. - -## Usage - -All markers have a `+operator-sdk:gen-csv` prefix, denoting that they're parsed while executing -[`operator-sdk generate bundle`][cli-gen-bundle] or [`operator-sdk generate packagemanifests`][cli-gen-packagemanifests]. - -### Paths - -Paths are dot-separated string hierarchies with the above prefix that map to CSV [`spec`][csv-spec] field names. - -Example: `+operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Pod Count"` - -#### customresourcedefinitions - -- `customresourcedefinitions`: child path token - - `displayName`: quoted string or string literal - - `resources`: quoted string or string literal, in the format `"kind,version,\"name\""` or `` `kind,version,"name"` ``, where `kind`, `version`, and `name` are fields in each CSV `resources` entry - - `specDescriptors`, `statusDescriptors`: bool, or child path token - - `displayName`: quoted string or string literal - - `x-descriptors`: quoted string or string literal comma-separated list of [`x-descriptor`][csv-x-desc] UI hints. - -**NOTES** -- `specDescriptors` and `statusDescriptors` with a value of `true` is required for each field to be included in their respective `customresourcedefinitions` CSV fields. See the examples below. -- `customresourcedefinitions` top-level `kind`, `name`, and `version` fields are parsed from API code. -- All `description` fields are parsed from type declaration and `struct` type field comments. -- `path` is parsed out of a field's JSON tag and merged with parent field path's in dot-hierarchy notation. - -### Examples - -These examples assume `Memcached`, `MemcachedSpec`, and `MemcachedStatus` are the example projects' kind, spec, and status. - -1. Set a display name for a `customresourcedefinitions` kind entry: - -```go -// +operator-sdk:gen-csv:customresourcedefinitions.displayName="Memcached App" -type Memcached struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec MemcachedSpec `json:"spec,omitempty"` - Status MemcachedStatus `json:"status,omitempty"` -} -``` - -2. Set `displayName`, `path`, `x-descriptors`, and `description` on a field for a `customresourcedefinitions.specDescriptors` entry: - -```go -type MemcachedSpec struct { - // Size is the size of the memcached deployment. <-- This will become Size's specDescriptors.description. - // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true - // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Pod Count" - // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.x-descriptors="urn:alm:descriptor:com.tectonic.ui:podCount,urn:alm:descriptor:io.kubernetes:custom" - Size int32 `json:"size"` // <-- Size's specDescriptors.path is inferred from this JSON tag. -} -``` - -3. Let the SDK infer all unmarked paths on a field for a `customresourcedefinitions.specDescriptors` entry: - -```go -type MemcachedSpec struct { - // Size is the size of the memcached deployment. - // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true - Size int32 `json:"size"` -} -``` - -The SDK uses the `Size` fields' `json` tag name as `path`, `Size` as `displayName`, and field comments as `description`. - -The SDK also checks `path` elements against a list of well-known path to x-descriptor string mappings and either uses a match as `x-descriptors`, or does not set `x-descriptors`. Supported mappings: - -#### Spec x-descriptors - -{{}} -| Path | x-descriptor | -|-----|-----| -| `size` | `urn:alm:descriptor:com.tectonic.ui:podCount` | -| `podCount` | `urn:alm:descriptor:com.tectonic.ui:podCount` | -| `endpoints` | `urn:alm:descriptor:com.tectonic.ui:endpointList` | -| `endpointList` | `urn:alm:descriptor:com.tectonic.ui:endpointList` | -| `label` | `urn:alm:descriptor:com.tectonic.ui:label` | -| `resources` | `urn:alm:descriptor:com.tectonic.ui:resourceRequirements` | -| `resourceRequirements` | `urn:alm:descriptor:com.tectonic.ui:resourceRequirements` | -| `selector` | `urn:alm:descriptor:com.tectonic.ui:selector:` | -| `namespaceSelector` | `urn:alm:descriptor:com.tectonic.ui:namespaceSelector` | -| none | `urn:alm:descriptor:io.kubernetes:` | -| `booleanSwitch` | `urn:alm:descriptor:com.tectonic.ui:booleanSwitch` | -| `password` | `urn:alm:descriptor:com.tectonic.ui:password` | -| `checkbox` | `urn:alm:descriptor:com.tectonic.ui:checkbox` | -| `imagePullPolicy` | `urn:alm:descriptor:com.tectonic.ui:imagePullPolicy` | -| `updateStrategy` | `urn:alm:descriptor:com.tectonic.ui:updateStrategy` | -| `text` | `urn:alm:descriptor:com.tectonic.ui:text` | -| `number` | `urn:alm:descriptor:com.tectonic.ui:number` | -| `nodeAffinity` | `urn:alm:descriptor:com.tectonic.ui:nodeAffinity` | -| `podAffinity` | `urn:alm:descriptor:com.tectonic.ui:podAffinity` | -| `podAntiAffinity` | `urn:alm:descriptor:com.tectonic.ui:podAntiAffinity` | -| none | `urn:alm:descriptor:com.tectonic.ui:fieldGroup:` | -| none | `urn:alm:descriptor:com.tectonic.ui:arrayFieldGroup:` | -| none | `urn:alm:descriptor:com.tectonic.ui:select:` | -| `advanced` | `urn:alm:descriptor:com.tectonic.ui:advanced` | -{{
}} - -#### Status x-descriptors - -{{}} -| Path | x-descriptor | -|-----|-----| -| `podStatuses` | `urn:alm:descriptor:com.tectonic.ui:podStatuses` | -| `size` | `urn:alm:descriptor:com.tectonic.ui:podCount` | -| `podCount` | `urn:alm:descriptor:com.tectonic.ui:podCount` | -| `link` | `urn:alm:descriptor:org.w3:link` | -| `w3link` | `urn:alm:descriptor:org.w3:link` | -| `conditions` | `urn:alm:descriptor:io.kubernetes.conditions` | -| `text` | `urn:alm:descriptor:text` | -| `prometheusEndpoint` | `urn:alm:descriptor:prometheusEndpoint` | -| `phase` | `urn:alm:descriptor:io.kubernetes.phase` | -| `k8sPhase` | `urn:alm:descriptor:io.kubernetes.phase` | -| `reason` | `urn:alm:descriptor:io.kubernetes.phase:reason` | -| `k8sReason` | `urn:alm:descriptor:io.kubernetes.phase:reason` | -| none | `urn:alm:descriptor:io.kubernetes:` | -{{
}} - -**NOTE:** any x-descriptor that ends in `:` will not be inferred by `path` element, ex. `urn:alm:descriptor:io.kubernetes:`. Use the `x-descriptors` marker if you want to enable one for your type. - -4. A comprehensive example: -- Infer `path`, `description`, `displayName`, and `x-descriptors` for `specDescriptors` and `statusDescriptors` entries. -- Create three `resources` entries each with `kind`, `version`, and `name` values. - -```go -// Represents a cluster of Memcached apps -// +operator-sdk:gen-csv:customresourcedefinitions.displayName="Memcached App" -// +operator-sdk:gen-csv:customresourcedefinitions.resources="Deployment,v1,\"memcached-operator\"" -// +operator-sdk:gen-csv:customresourcedefinitions.resources=`Service,v1,"memcached-operator"` -type Memcached struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec MemcachedSpec `json:"spec,omitempty"` - Status MemcachedStatus `json:"status,omitempty"` -} - -type MemcachedSpec struct { - Pods MemcachedPods `json:"pods"` -} - -type MemcachedStatus struct { - Pods MemcachedPods `json:"podStatuses"` -} - -type MemcachedPods struct { - // Size is the size of the memcached deployment. - // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true - // +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors=true - Size int32 `json:"size"` -} -``` - -The generated `customresourcedefinitions` will look like: - -```yaml -customresourcedefinitions: - owned: - - description: Represents a cluster of Memcached apps - displayName: Memcached App - kind: Memcached - name: memcacheds.cache.example.com - version: v1alpha1 - resources: - - kind: Deployment - name: A Kubernetes Deployment - version: v1 - - kind: ReplicaSet - name: A Kubernetes ReplicaSet - version: v1beta2 - - kind: Pod - name: A Kubernetes Pod - version: v1 - specDescriptors: - - description: The desired number of member Pods for the deployment. - displayName: Size - path: pods.size - x-descriptors: - - 'urn:alm:descriptor:com.tectonic.ui:podCount' - statusDescriptors: - - description: The desired number of member Pods for the deployment. - displayName: Size - path: podStatuses.size - x-descriptors: - - 'urn:alm:descriptor:com.tectonic.ui:podStatuses' - - 'urn:alm:descriptor:com.tectonic.ui:podCount' -``` - -[markers]:https://pkg.go.dev/sigs.k8s.io/controller-tools/pkg/markers -[code-markers-design]:https://github.com/operator-framework/operator-sdk/blob/master/proposals/sdk-code-annotations.md -[cli-gen-bundle]:/docs/cli/operator-sdk_generate_bundle -[cli-gen-packagemanifests]:/docs/cli/operator-sdk_generate_packagemanifests -[csv-x-desc]:https://github.com/openshift/console/blob/feabd61/frontend/packages/operator-lifecycle-manager/src/components/descriptors/types.ts#L3-L39 -[csv-spec]:https://github.com/operator-framework/operator-lifecycle-manager/blob/e0eea22/doc/design/building-your-csv.md diff --git a/website/content/en/docs/golang/legacy/references/project-layout.md b/website/content/en/docs/golang/legacy/references/project-layout.md deleted file mode 100644 index f08859737f..0000000000 --- a/website/content/en/docs/golang/legacy/references/project-layout.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -title: Project Scaffolding Layout for Operator SDK -linkTitle: Project Layout -weight: 4 ---- - -The `operator-sdk` CLI generates a number of packages for each project. The following table describes a basic rundown of each generated file/directory. - - -| File/Folders | Purpose | -| :--- | :--- | -| cmd | Contains `manager/main.go` which is the main program of the operator. This instantiates a new manager which registers all custom resource definitions under `pkg/apis/...` and starts all controllers under `pkg/controllers/...` . | -| pkg/apis | Contains the directory tree that defines the APIs of the Custom Resource Definitions(CRD). Users are expected to edit the `pkg/apis///_types.go` files to define the API for each resource type and import these packages in their controllers to watch for these resource types.| -| pkg/controller | This pkg contains the controller implementations. Users are expected to edit the `pkg/controller//_controller.go` to define the controller's reconcile logic for handling a resource type of the specified `kind`. | -| build | Contains the `Dockerfile` and build scripts used to build the operator. | -| deploy | Contains various YAML manifests for registering CRDs, setting up [RBAC][RBAC], and deploying the operator as a Deployment. -| go.mod go.sum | The [Go mod][go_mod] manifests that describe the external dependencies of this operator. | -| vendor | The golang [vendor][Vendor] directory that contains local copies of external dependencies that satisfy Go imports in this project. [Go modules][go_mod] manages the vendor directory directly. This directory will not exist unless the project is initialized with the `--vendor` flag, or `go mod vendor` is run in the project root. | -| version | Contains version information of the operator. -[RBAC]: https://kubernetes.io/docs/reference/access-authn-authz/rbac/ -[Vendor]: https://golang.org/cmd/go/#hdr-Vendor_Directories -[go_mod]: https://github.com/golang/go/wiki/Modules diff --git a/website/content/en/docs/golang/legacy/unit-testing.md b/website/content/en/docs/golang/legacy/unit-testing.md deleted file mode 100644 index 3b3db9d5a4..0000000000 --- a/website/content/en/docs/golang/legacy/unit-testing.md +++ /dev/null @@ -1,262 +0,0 @@ ---- -title: Unit testing with Operator SDK -linkTitle: Unit Testing -weight: 10 ---- - -## Overview - -Testing your operator should involve both unit and [end-to-end][doc-e2e-test] tests. Unit tests assess the expected outcomes of individual operator components without requiring coordination between components. Operator unit tests should test multiple scenarios likely to be encountered by your custom operator logic at runtime. Much of your custom logic will involve API server calls via a [client][doc-client]; `Reconcile()` in particular will be making API calls on each reconciliation loop. These API calls can be mocked by using `controller-runtime`'s [fake client][doc-cr-fake-client], perfect for unit testing. This document steps through writing a unit test for the [memcached-operator][repo-memcached-reconcile]'s `Reconcile()` method using a fake client. - -## Using a Fake client - -The `controller-runtime`'s fake client exposes the same set of operations as a typical client, but simply tracks objects rather than sending requests over a network. You can create a new fake client that tracks an initial set of objects with the following code: - -```Go -import ( - "context" - "testing" - - cachev1alpha1 "github.com/example-inc/memcached-operator/pkg/apis/cache/v1alpha1" - "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/fake" -) - -func TestMemcachedController(t *testing.T) { - ... - // A Memcached object with metadata and spec. - memcached := &cachev1alpha1.Memcached{ - ObjectMeta: metav1.ObjectMeta{ - Name: "memcached", - Namespace: "memcached-operator", - Labels: map[string]string{ - "label-key": "label-value", - }, - }, - } - - // Objects to track in the fake client. - objs := []runtime.Object{memcached} - - // Create a fake client to mock API calls. - cl := fake.NewFakeClient(objs...) - - // List Memcached objects filtering by labels - opt := client.MatchingLabels(map[string]string{"label-key": "label-value"}) - memcachedList := &cachev1alpha1.MemcachedList{} - err := cl.List(context.TODO(), memcachedList, opt) - if err != nil { - t.Fatalf("list memcached: (%v)", err) - } - ... -} -``` -The fake client `cl` will cache `memcached` in an internal object tracker so that CRUD operations via `cl` can be performed on it. - -## Testing Reconcile - -[`Reconcile()`][doc-reconcile] performs most API server calls a particular operator controller will make. `ReconcileMemcached.Reconcile()` will ensure the `Memcached` resource exists as well as reconcile the state of owned Deployments and Pods. We can test runtime reconciliation scenarios using the above client. The following is an example that tests if `Reconcile()` creates a deployment if one is not found, and whether the created deployment is correct: - -```Go -import ( - "context" - "testing" - - cachev1alpha1 "github.com/example-inc/memcached-operator/pkg/apis/cache/v1alpha1" - - appsv1 "k8s.io/api/apps/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/kubernetes/scheme" - "sigs.k8s.io/controller-runtime/pkg/client/fake" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - logf "sigs.k8s.io/controller-runtime/pkg/log" -) - -func TestMemcachedControllerDeploymentCreate(t *testing.T) { - var ( - name = "memcached-operator" - namespace = "memcached" - replicas int32 = 3 - ) - // A Memcached object with metadata and spec. - memcached := &cachev1alpha1.Memcached{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - }, - Spec: cachev1alpha1.MemcachedSpec{ - Size: replicas, // Set desired number of Memcached replicas. - }, - } - - // Objects to track in the fake client. - objs := []runtime.Object{ memcached } - - // Register operator types with the runtime scheme. - s := scheme.Scheme - s.AddKnownTypes(cachev1alpha1.SchemeGroupVersion, memcached) - - // Create a fake client to mock API calls. - cl := fake.NewFakeClient(objs...) - - // Create a ReconcileMemcached object with the scheme and fake client. - r := &ReconcileMemcached{client: cl, scheme: s} - - // Mock request to simulate Reconcile() being called on an event for a - // watched resource . - req := reconcile.Request{ - NamespacedName: types.NamespacedName{ - Name: name, - Namespace: namespace, - }, - } - res, err := r.Reconcile(req) - if err != nil { - t.Fatalf("reconcile: (%v)", err) - } - // Check the result of reconciliation to make sure it has the desired state. - if !res.Requeue { - t.Error("reconcile did not requeue request as expected") - } - // Check if deployment has been created and has the correct size. - dep := &appsv1.Deployment{} - err = r.client.Get(context.TODO(), req.NamespacedName, dep) - if err != nil { - t.Fatalf("get deployment: (%v)", err) - } - // Check if the quantity of Replicas for this deployment is equals the specification - dsize := *dep.Spec.Replicas - if dsize != replicas { - t.Errorf("dep size (%d) is not the expected size (%d)", dsize, replicas) - } -} -``` - -**The above tests check if:** - -- `Reconcile()` fails to find a Deployment object -- A Deployment is created -- The request is requeued in the expected manner -- The number of replicas in the created Deployment's spec is as expected. - -**NOTE**: A unit test checking more cases can be found in our [`samples repo`][code-test-example]. - -## Testing with 3rd Party Resources - -You may have added third-party resources in your operator as described in the [`Advanced Topics section of the user guide`][user-guide]. In order to create a unit-test to test these kinds of resources, it might be necessary to update the Scheme with the third-party resources and pass it to your Reconciler. -The following code snippet is an example that adds the [`v1.Route`][ocp-doc-v1-route] OpenShift scheme to the ReconcileMemcached reconciler's scheme. - -```go - -import ( - ... - routev1 "github.com/openshift/api/route/v1" - ... -) - -// TestMemcachedController runs ReconcileMemcached.Reconcile() against a -// fake client that tracks a Memcached object. -func TestMemcachedController(t *testing.T) { - ... - // Register operator types with the runtime scheme. - s := scheme.Scheme - - // Add route Openshift scheme - if err := routev1.AddToScheme(s); err != nil { - t.Fatalf("Unable to add route scheme: (%v)", err) - } - - // Create the mock for the Route - // NOTE: If the object will be created by the reconcile you do not need add a mock for it - route := &routev1.Route{ - ObjectMeta: v1.ObjectMeta{ - Name: name, - Namespace: namespace, - Labels: getAppLabels(name), - }, - } - - s.AddKnownTypes(appv1alpha1.SchemeGroupVersion, memcached) - - // Create a fake client to mock API calls. - cl := fake.NewFakeClient(objs...) - - // Create a ReconcileMemcached object with the scheme and fake client. - r := &ReconcileMemcached{client: cl, scheme: s} - ... -} -``` - -**NOTE:** If your Reconcile has not the scheme attribute you may create the client fake as `cl := fake.NewFakeClientWithScheme(s, objs...)` in order to add the schema. - -In this way, you will be able to get the mock object injected into the Reconcile as the following example. - -```go - route := &routev1.Route{} - err = r.client.Get(context.TODO(), types.NamespacedName{Name: name, Namespace: namespace}, route) - if err != nil { - t.Fatalf("get route: (%v)", err) - } -``` - -**NOTE:** Following an example of issue that can be faced because of an invalid `TypeMeta.APIVersion` informed. It is not recommended declared the `TypeMeta` since it will be implicit generated. - -```shell -get route: (no kind "Route" is registered for version "v1" in scheme "k8s.io/client-go/kubernetes/scheme/register.go:61") -``` - -Following an example which could cause this error. - -```go - ... - route := &routev1.Route{ - TypeMeta: v1.TypeMeta{ // TODO (user): Remove the TypeMeta declared - APIVersion: "v1", // the correct value will be `"route.openshift.io/v1"` - Kind: "Route", - }, - ObjectMeta: v1.ObjectMeta{ - Name: name, - Namespace: namespace, - Labels: ls, - }, - } - ... -``` - -Following another example of the issue that can be faced when the third-party resource schema was not added properly. - -```shell -create a route: (no kind is registered for the type v1.Route in scheme "k8s.io/client-go/kubernetes/scheme/register.go:61")` -``` - -## How to increase the verbosity of the logs? - -Following is a snippet code as an example to increase the verbosity of the logs in order to better troubleshoot your tests. - -```go -import ( - ... - logf "sigs.k8s.io/controller-runtime/pkg/log" - ... -) -func TestMemcachedController(t *testing.T) { - //dev logs - logf.SetLogger(logf.ZapLogger(true)) - ... -} -``` - - - - -[doc-e2e-test]: ../e2e-tests -[doc-client]:/docs/golang/legacy/references/client/ -[doc-cr-fake-client]: https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/client/fake -[repo-memcached-reconcile]: https://github.com/operator-framework/operator-sdk-samples/blob/4c6934448684a6953ece4d3d9f3f77494b1c125e/memcached-operator/pkg/controller/memcached/memcached_controller.go#L82 -[doc-reconcile]: https://godoc.org/sigs.k8s.io/controller-runtime/pkg/reconcile#Reconciler -[code-test-example]: https://github.com/operator-framework/operator-sdk-samples/blob/master/go/memcached-operator/pkg/controller/memcached/memcached_controller_test.go#L25 -[user-guide]:/docs/golang/legacy/quickstart/#register-with-the-managers-scheme -[ocp-doc-v1-route]: https://docs.openshift.com/container-platform/3.11/rest_api/apis-route.openshift.io/v1.Route.html diff --git a/website/content/en/docs/golang/quickstart.md b/website/content/en/docs/golang/quickstart.md index 529f5e8302..f72ecd21af 100644 --- a/website/content/en/docs/golang/quickstart.md +++ b/website/content/en/docs/golang/quickstart.md @@ -461,7 +461,6 @@ Also see the [advanced topics][advanced_topics] doc for more use cases and under [controller_options]: https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/controller#Options [controller_godocs]: https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/controller [operator_scope]:/docs/golang/operator-scope/ -[memcached_handler]: ../example/memcached-operator/handler.go.tmpl [kubebuilder_layout_doc]:https://book.kubebuilder.io/cronjob-tutorial/basic-project.html [homebrew_tool]:https://brew.sh/ [go_mod_wiki]: https://github.com/golang/go/wiki/Modules @@ -484,13 +483,13 @@ Also see the [advanced topics][advanced_topics] doc for more use cases and under [markers]: https://book.kubebuilder.io/reference/markers.html [crd-markers]: https://book.kubebuilder.io/reference/markers/crd-validation.html [rbac-markers]: https://book.kubebuilder.io/reference/markers/rbac.html -[memcached_controller]: https://github.com/operator-framework/operator-sdk/blob/master/example/kb-memcached-operator/memcached_controller.go.tmpl +[memcached_controller]: https://github.com/operator-framework/operator-sdk/blob/master/example/memcached-operator/memcached_controller.go.tmpl [builder_godocs]: https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/builder#example-Builder -[legacy_quickstart_doc]:/docs/golang/legacy/quickstart/ +[legacy_quickstart_doc]:https://v0-19-x.sdk.operatorframework.io/docs/golang/legacy/quickstart/ [activate_modules]: https://github.com/golang/go/wiki/Modules#how-to-install-and-activate-module-support [advanced_topics]: /docs/golang/advanced-topics/ [create_a_webhook]: /docs/golang/webhooks/ [status_marker]: https://book.kubebuilder.io/reference/generating-crd.html#status [status_subresource]: https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#status-subresource [API-groups]:https://kubernetes.io/docs/concepts/overview/kubernetes-api/#api-groups -[legacy_CLI]:/docs/cli +[legacy_CLI]:https://v0-19-x.sdk.operatorframework.io/docs/cli/ diff --git a/website/content/en/docs/helm/quickstart.md b/website/content/en/docs/helm/quickstart.md index 99fefc9968..63e149778d 100644 --- a/website/content/en/docs/helm/quickstart.md +++ b/website/content/en/docs/helm/quickstart.md @@ -307,11 +307,11 @@ kubectl delete -f deploy/crds/example.com_nginxes_crd.yaml **NOTE** Additional CR/CRD's can be added to the project by running, for example, the command :`operator-sdk add api --api-version=cache.example.com/v1alpha1 --kind=AppService` For more information, refer [cli][addcli] doc. -[operator-scope]: /docs/legacy-common/operator-scope +[operator-scope]:https://v0-19-x.sdk.operatorframework.io/docs/legacy-common/operator-scope/ [layout-doc]: /docs/helm/reference/scaffolding [helm-charts]:https://helm.sh/docs/topics/charts/ [helm-values]:https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing [helm-official]:https://helm.sh/docs/ [addcli]: /docs/cli/operator-sdk_add_api -[quickstart-bundle]: /docs/olm-integration/legacy/quickstart-bundle +[quickstart-bundle]:https://v0-19-x.sdk.operatorframework.io/docs/olm-integration/legacy/quickstart-bundle/ diff --git a/website/content/en/docs/legacy-common/crds-scope.md b/website/content/en/docs/legacy-common/crds-scope.md deleted file mode 100644 index b7affbbbf4..0000000000 --- a/website/content/en/docs/legacy-common/crds-scope.md +++ /dev/null @@ -1,92 +0,0 @@ ---- -title: CRD scope with Operator SDK -linkTitle: CRD Scope -weight: 60 ---- - -## Overview - -The CustomResourceDefinition (CRD) scope can also be changed for cluster-scoped operators so that there is only a single -instance (for a given name) of the CRD to manage across the cluster. - -**NOTE**: Cluster-scoped CRDs are **NOT** supported with the Helm operator. While Helm releases can create -cluster-scoped resources, Helm's design requires the release itself to be created in a specific namespace. Since the -Helm operator uses a 1-to-1 mapping between a CR and a Helm release, Helm's namespace-scoped release requirement -extends to Helm operator's namespace-scoped CR requirement. - -For each CRD that needs to be cluster-scoped, update its manifest to be cluster-scoped. - -* `deploy/crds/__crd.yaml` - * Set `spec.scope: Cluster` - -To ensure that the CRD is always generated with `scope: Cluster`, add the marker -`// +kubebuilder:resource:path=,scope=Cluster`, or if already present replace `scope={Namespaced -> Cluster}`, -above the CRD's Go type definition in `pkg/apis///_types.go`. Note that the `` -element must be the same lower-case plural value of the CRD's Kind, `spec.names.plural`. - -## CRD cluster-scoped usage - -A cluster scope is ideal for operators that manage custom resources (CR's) that can be created in more than one namespace in a cluster. - -**NOTE**: When a `Manager` instance is created in the `main.go` file, it receives the namespace(s) as Options. -These namespace(s) should be watched and cached for the Client which is provided by the Controllers. Only clients -provided by cluster-scoped projects where the `Namespace` attribute is `""` will be able to manage cluster-scoped CRD's. -For more information see the [Manager][manager_user_guide] topic in the user guide and the -[Manager Options][manager_options]. - -## Example for changing the CRD scope from Namespaced to Cluster - -The following example is for Go based-operators. `scope: Cluster` must set manually for Helm and Ansible based-operators. - -- Check the `spec.names.plural` in the CRD's Kind YAML file - -* `deploy/crds/cache_v1alpha1_memcached_crd.yaml` - ```YAML - apiVersion: apiextensions.k8s.io/v1beta1 - kind: CustomResourceDefinition - metadata: - name: memcacheds.cache.example.com - spec: - group: cache.example.com - names: - kind: Memcached - listKind: MemcachedList - plural: memcacheds - singular: memcached - scope: Namespaced - ``` - -- Update the `pkg/apis///_types.go` by adding the -marker `// +kubebuilder:resource:path=,scope=Cluster` - -* `pkg/apis/cache/v1alpha1/memcached_types.go` - ```Go - // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - - // Memcached is the Schema for the memcacheds API - // +kubebuilder:resource:path=memcacheds,scope=Cluster - type Memcached struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec MemcachedSpec `json:"spec,omitempty"` - Status MemcachedStatus `json:"status,omitempty"` - } - ``` -- Execute the command `operator-sdk generate crds`, then you should be able to check that the CRD was updated with the cluster scope as in the following example: - -* `deploy/crds/cache.example.com_memcacheds_crd.yaml` - ```YAML - apiVersion: apiextensions.k8s.io/v1beta1 - kind: CustomResourceDefinition - metadata: - name: memcacheds.cache.example.com - spec: - group: cache.example.com - ... - scope: Cluster - ``` - -[RBAC]: https://kubernetes.io/docs/reference/access-authn-authz/rbac/ -[manager_user_guide]:/docs/golang/legacy/quickstart/#manager -[manager_options]: https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/manager#Options diff --git a/website/content/en/docs/legacy-common/operator-scope.md b/website/content/en/docs/legacy-common/operator-scope.md deleted file mode 100644 index 6b75fd9940..0000000000 --- a/website/content/en/docs/legacy-common/operator-scope.md +++ /dev/null @@ -1,102 +0,0 @@ ---- -title: Operators and CRD scope with Operator SDK -linkTitle: Operator Scope -weight: 50 ---- - -## Overview - -A namespace-scoped operator watches and manages resources in a single namespace, whereas a cluster-scoped operator watches and manages resources cluster-wide. Namespace-scoped operators are preferred because of their flexibility. They enable decoupled upgrades, namespace isolation for failures and monitoring, and differing API definitions. - -However, there are use cases where a cluster-scoped operator may make sense. For example, the [cert-manager](https://github.com/jetstack/cert-manager) operator is often deployed with cluster-scoped permissions and watches so that it can manage issuing certificates for an entire cluster. - -**NOTE**: CustomResourceDefinition (CRD) scope can also be changed to cluster-scoped. See the [CRD scope][crd-scope-doc] document for more details. - -## Namespace-scoped operator usage - -This scope is ideal for operator projects which will control resources just in one namespace, which is where the operator is deployed. - -**NOTE:** Projects created by `operator-sdk` are namespace-scoped by default which means that they will NOT have a `ClusterRole` defined in `deploy/`. - -## Cluster-scoped operator usage - -This scope is ideal for operator projects which will control resources in more than one namespace. - -### Changes required for a cluster-scoped operator - -The SDK scaffolds operators to be namespaced by default but with a few modifications to the default manifests the operator can be run as cluster-scoped. - -* `deploy/operator.yaml`: - * Set `WATCH_NAMESPACE=""` to watch all namespaces instead of setting it to the pod's namespace - * Set `metadata.namespace` to define the namespace where the operator will be deployed. -* `deploy/role.yaml`: - * Use `ClusterRole` instead of `Role` -* `deploy/role_binding.yaml`: - * Use `ClusterRoleBinding` instead of `RoleBinding` - * Use `ClusterRole` instead of `Role` for `roleRef` - * Set the subject namespace to the namespace in which the operator is deployed. -* `deploy/service_account.yaml`: - * Set `metadata.namespace` to the namespace where the operator is deployed. - - -### Example for cluster-scoped operator - -With the above changes the specified manifests should look as follows: - -* `deploy/operator.yaml`: - ```YAML - apiVersion: apps/v1 - kind: Deployment - metadata: - name: memcached-operator - namespace: - ... - spec: - ... - template: - ... - spec: - ... - serviceAccountName: memcached-operator - containers: - - name: memcached-operator - ... - env: - - name: WATCH_NAMESPACE - value: "" - ``` -* `deploy/role.yaml`: - ```YAML - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRole - metadata: - name: memcached-operator - ... - ``` -* `deploy/role_binding.yaml`: - ```YAML - kind: ClusterRoleBinding - apiVersion: rbac.authorization.k8s.io/v1 - metadata: - name: memcached-operator - subjects: - - kind: ServiceAccount - name: memcached-operator - namespace: - roleRef: - kind: ClusterRole - name: memcached-operator - apiGroup: rbac.authorization.k8s.io -* `deploy/service_account.yaml` - ```YAML - apiVersion: v1 - kind: ServiceAccount - metadata: - name: memcached-operator - namespace: - ``` - -[RBAC]: https://kubernetes.io/docs/reference/access-authn-authz/rbac/ -[manager_user_guide]:/docs/golang/legacy/quickstart/#manager -[manager_options]: https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/manager#Options -[crd-scope-doc]: /docs/legacy-common/crds-scope \ No newline at end of file diff --git a/website/content/en/docs/migration/v0.1.0-migration-guide.md b/website/content/en/docs/migration/v0.1.0-migration-guide.md index 2f9948d137..edc5a140b5 100644 --- a/website/content/en/docs/migration/v0.1.0-migration-guide.md +++ b/website/content/en/docs/migration/v0.1.0-migration-guide.md @@ -309,7 +309,7 @@ At this point you should be able to build and run your operator to verify that i [controller-go-doc]: https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg#hdr-Controller [request-go-doc]: https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/reconcile#Request [result-go-doc]: https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/reconcile#Result -[client-api-doc]: ../../golang/legacy/references/client +[client-api-doc]: https://v0-19-x.sdk.operatorframework.io/docs/golang/references/client/ [manager-go-doc]: https://godoc.org/github.com/kubernetes-sigs/controller-runtime/pkg/manager -[register-3rd-party-resources]: ../../golang/legacy/quickstart#adding-3rd-party-resources-to-your-operator -[user-guide-build-run]: ../../golang/legacy/quickstart#build-and-run-the-operator +[register-3rd-party-resources]: https://v0-19-x.sdk.operatorframework.io/docs/golang/legacy/quickstart/#adding-3rd-party-resources-to-your-operator +[user-guide-build-run]: https://v0-19-x.sdk.operatorframework.io/docs/golang/legacy/quickstart/#build-and-run-the-operator diff --git a/website/content/en/docs/migration/version-upgrade-guide.md b/website/content/en/docs/migration/version-upgrade-guide.md index 5de1dfe460..7639983a56 100644 --- a/website/content/en/docs/migration/version-upgrade-guide.md +++ b/website/content/en/docs/migration/version-upgrade-guide.md @@ -1365,12 +1365,12 @@ first `COPY` from `COPY /*.yaml manifests/` to `COPY deploy/olm-catalog/` directory. This command handles generating both manifests and metadata. -- [`bundle validate`][cli-bundle-validate]: validates an Operator bundle image or unpacked manifests and metadata. - -##### Package Manifests - -- [`generate packagemanifests`][cli-gen-packagemanifests]: creates a new or updates an existing versioned -directory as part of the package manifests in the `deploy/olm-catalog/` directory. -- [`run packagemanifests`][doc-testing-deployment]: runs an Operator's package manifests format -with an existing OLM installation. - - -[bundle]:https://github.com/operator-framework/operator-registry/blob/v1.12.6/docs/design/operator-bundle.md -[package-manifests]:https://github.com/operator-framework/operator-registry/tree/v1.5.3#manifest-format -[doc-olm-generate]:/docs/olm-integration/legacy/generating-a-csv -[cli-olm-install]:/docs/cli/operator-sdk_olm_install -[cli-olm-status]:/docs/cli/operator-sdk_olm_status -[cli-olm-uninstall]:/docs/cli/operator-sdk_olm_uninstall -[cli-gen-bundle]:/docs/cli/operator-sdk_generate_bundle -[cli-gen-packagemanifests]:/docs/cli/operator-sdk_generate_packagemanifests -[cli-bundle-validate]:/docs/cli/operator-sdk_bundle_validate -[doc-testing-deployment]:/docs/olm-integration/legacy/testing-deployment diff --git a/website/content/en/docs/olm-integration/legacy/generating-a-csv.md b/website/content/en/docs/olm-integration/legacy/generating-a-csv.md deleted file mode 100644 index 0994379bf2..0000000000 --- a/website/content/en/docs/olm-integration/legacy/generating-a-csv.md +++ /dev/null @@ -1,210 +0,0 @@ ---- -title: Generating Manifests and Metadata -linkTitle: Generating Manifests and Metadata -weight: 20 ---- - -This document describes how to manage packaging and shipping your Operator in the following stages: - -* **Generate your first release** - encapsulate the metadata needed to install your Operator with the -[Operator Lifecycle Manager][olm] and configure the permissions it needs from the generated SDK files. -* **Update your Operator** - apply any updates to Operator manifests made during development. -* **Upgrade your Operator** - carry over any customizations you have made and ensure a rolling update to the -next version of your Operator. - -## Overview - -Several `operator-sdk` subcommands manage operator-framework manifests, in particular [`ClusterServiceVersion`'s (CSVs)][doc-csv], -for an Operator: [`generate bundle`][cli-gen-bundle] and [`generate packagemanifests`][cli-gen-packagemanifests]. -See this [CLI overview][cli-overview] for details on each command. - -### ClusterServiceVersion manifests - -CSVs are manifests that define all aspects of an Operator, from what CustomResourceDefinitions (CRDs) it uses to -metadata describing the Operator's maintainers. They are typically versioned by semver, much like Operator projects -themselves; this version is present in both their `metadata.name` and `spec.version` fields. The CSV generator called -by `generate ` requires certain input manifests to construct a CSV manifest; -all inputs are read when either command is invoked, along with a [base](#generate-your-first-release) CSV, -to idempotently regenerate a CSV. - -The following resource kinds are typically included in a CSV: - - `Role`: define Operator permissions within a namespace. - - `ClusterRole`: define cluster-wide Operator permissions. - - `Deployment`: define how the Operator's operand is run in pods. - - `CustomResourceDefinition`: definitions of custom objects your Operator reconciles. - - Custom resource examples: examples of objects adhering to the spec of a particular CRD. - -**For Go Operators only:** these commands parse [CSV markers][csv-markers] from API type definitions, located -in `./pkg/apis`, to populate certain CSV fields. You can set an alternative path to the API types -root directory with `--apis-dir`. These markers are not available to Ansible or Helm project types. - -## Generate your first release - -You've recently run `operator-sdk new` and created your APIs with `operator-sdk add api`. Now you'd like to -package your Operator for deployment by OLM. Your Operator is at version `v0.0.1`. - -**Note:** you must set `--version=` when running either `generate ` for the first -time, and every time when running `generate packagemanifests`. - -### Bundle format - -A [bundle][bundle] consists of manifests (CSV and CRDs) and metadata that define an Operator -at a particular version. You may have also heard of a bundle image. From the bundle docs: - -> An Operator Bundle is built as a scratch (non-runnable) container image that -> contains operator manifests and specific metadata in designated directories -> inside the image. Then, it can be pushed and pulled from an OCI-compliant -> container registry. Ultimately, an operator bundle will be used by Operator -> Registry and OLM to install an operator in OLM-enabled clusters. - -At this stage in your Operator's development, we only need to worry about generating bundle files; -bundle images become important once you're ready to [publish][operatorhub] your Operator. - -By default `generate bundle` will generate a CSV, copy CRDs, and generate metadata in the bundle format: - -```console -$ operator-sdk generate bundle --version 0.0.1 -$ tree ./deploy/olm-catalog/test-operator -./deploy/olm-catalog/test-operator -├── manifests -│   ├── cache.my.domain_memcacheds.yaml -│   └── memcached-operator.clusterserviceversion.yaml -└── metadata - └── annotations.yaml -``` - -Bundle metadata in `deploy/olm-catalog//metadata/annotations.yaml` contains information about a particular Operator version -available in a registry. OLM uses this information to install specific Operator versions and resolve dependencies. -That file and `bundle.Dockerfile` contain the same [annotations][bundle-metadata], the latter as `LABEL`s, -which do not need to be modified in most cases; if you do decide to modify them, both sets of annotations _must_ -be the same to ensure consistent Operator deployment. - -##### Channels - -Metadata for each bundle contains channel information as well: - -> Channels allow package authors to write different upgrade paths for different users (e.g. beta vs. stable). - -Channels become important when publishing, but we should still be aware of them beforehand as they're required -values in our metadata. `generate bundle` writes the channel `alpha` by default. - -### Package manifests format - -A [package manifests][package-manifests] format consists of on-disk manifests (CSV and CRDs) and metadata that -define an Operator at all versions of that Operator. Each version is contained in its own directory, with a parent -package manifest YAML file containing channel-to-version mappings, much like a bundle's metadata. - -By default `generate packagemanifests` will generate a CSV, a package manifest file, and copy CRDs in the -[package manifests][package-manifests] format: - -```console -$ operator-sdk generate bundle --version 0.0.1 -$ tree ./deploy/olm-catalog/test-operator -./deploy/olm-catalog/test-operator -├── 0.0.1 -│   ├── cache.my.domain_memcacheds.yaml -│   └── memcached-operator.clusterserviceversion.yaml -└── memcached-operator.package.yaml -``` - -## Update your Operator - -Let's say you added a new API `App` with group `app.example.com` and version `v1alpha1` to your Operator project, -and added a port to your manager Deployment in `deploy/operator.yaml`. - -If using a bundle format, the current version of your CSV can be updated by running: - -```console -$ operator-sdk generate bundle -``` - -If using a package manifests format, run: - -```console -$ operator-sdk generate packagemanifests --version 0.0.1 -``` - -Running the command for either format will append your new CRD to `spec.customresourcedefinitions.owned`, -replace the old data at `spec.install.spec.deployments` with your updated Deployment, -and update your existing CSV manifest. The SDK will not overwrite [user-defined](#csv-fields) -fields like `spec.maintainers`. - -## Upgrade your Operator - -Let's say you're upgrading your Operator to version `v0.0.2`. You also want to add a new channel `beta`, -and use it as the default channel. - -If using a bundle format, a new version of your CSV can be created by running: - -```console -$ operator-sdk generate bundle --version 0.0.2 --channels=beta --default-channel=beta -``` - -If using a package manifests format, run: - -```console -$ operator-sdk generate packagemanifests --from-version 0.0.1 --version 0.0.2 --channel=beta --default-channel -``` - -Running the command for either format will persist user-defined fields, updates `spec.version`, -and populates `spec.replaces` with the old CSV version's name. - -## CSV fields - -Below are two lists of fields: the first is a list of all fields the SDK and OLM expect in a CSV, and the second are optional. - -**For Go Operators only:** Several fields require user input (labeled _user_) or a [CSV marker][csv-markers] -(labeled _marker_). This list may change as the SDK becomes better at generating CSV's. -These markers are not available to Ansible or Helm project types. - -Required: -- `metadata.name`: a *unique* name for this CSV of the format `.vX.Y.Z`, ex. `app-operator.v0.0.1`. -- `spec.version`: semantic version of the Operator, ex. `0.0.1`. -- `spec.installModes`: what mode of [installation namespacing][install-modes] OLM should use. -Currently all but `MultiNamespace` are supported by SDK Operators. -- `spec.customresourcedefinitions`: any CRDs the Operator uses. Certain fields in elements of `owned` will be filled by the SDK. - - `owned`: all CRDs the Operator deploys itself from it's bundle. - - `name`: CRD's `metadata.name`. - - `kind`: CRD's `metadata.spec.names.kind`. - - `version`: CRD's `metadata.spec.version`. - - `description` _(marker)_ : description of the CRD. - - `displayName` _(marker)_ : display name of the CRD. - - `resources` _(marker)_ : any Kubernetes resources used by the CRD, ex. `Pod`'s and `ConfigMap`'s. - - `specDescriptors` _(marker)_ : UI hints for inputs and outputs of the Operator's spec. - - `statusDescriptors` _(marker)_ : UI hints for inputs and outputs of the Operator's status. - - `actionDescriptors` _(user)_ : UI hints for an Operator's in-cluster actions. - - `required` _(user)_ : all CRDs the Operator expects to be present in-cluster, if any. - All `required` element fields must be populated manually. - -Optional: -- `spec.description` _(user)_ : a thorough description of the Operator's functionality. -- `spec.displayName` _(user)_ : a name to display for the Operator in Operator Hub. -- `spec.keywords` _(user)_ : a list of keywords describing the Operator. -- `spec.maintainers` _(user)_ : a list of human or organizational entities maintaining the Operator, with a `name` and `email`. -- `spec.provider` _(user)_ : the Operator provider, with a `name`; usually an organization. -- `spec.labels` _(user)_ : a list of `key:value` pairs to be used by Operator internals. -- `metadata.annotations.alm-examples`: CR examples, in JSON string literal format, for your CRD's. Ideally one per CRD. -- `metadata.annotations.capabilities`: level of Operator capability. See the [Operator maturity model][olm-capabilities] -for a list of valid values. -- `spec.replaces`: the name of the CSV being replaced by this CSV. -- `spec.links` _(user)_ : a list of URL's to websites, documentation, etc. pertaining to the Operator or application -being managed, each with a `name` and `url`. -- `spec.selector` _(user)_ : selectors by which the Operator can pair resources in a cluster. -- `spec.icon` _(user)_ : a base64-encoded icon unique to the Operator, set in a `base64data` field with a `mediatype`. -- `spec.maturity`: the Operator's maturity, ex. `alpha`. - - -[olm]:https://github.com/operator-framework/operator-lifecycle-manager -[doc-csv]:https://github.com/operator-framework/operator-lifecycle-manager/blob/0.15.1/doc/design/building-your-csv.md -[cli-overview]:/docs/olm-integration/legacy/cli-overview -[cli-gen-kustomize-manifests]:/docs/cli/operator-sdk_generate_kustomize_manifests -[cli-gen-bundle]:/docs/cli/operator-sdk_generate_bundle -[cli-gen-packagemanifests]:/docs/cli/operator-sdk_generate_packagemanifests -[bundle]:https://github.com/operator-framework/operator-registry/blob/v1.12.6/docs/design/operator-bundle.md -[bundle-metadata]:https://github.com/operator-framework/operator-registry/blob/v1.12.6/docs/design/operator-bundle.md#bundle-annotations -[package-manifests]:https://github.com/operator-framework/operator-registry/tree/v1.5.3#manifest-format -[install-modes]:https://github.com/operator-framework/operator-lifecycle-manager/blob/4197455/Documentation/design/building-your-csv.md#operator-metadata -[olm-capabilities]:/docs/operator-capabilities/ -[csv-markers]:/docs/golang/legacy/references/markers -[operatorhub]:https://operatorhub.io/ -[operator-registry]:https://github.com/operator-framework/operator-registry/#building-an-index-of-operators-using-opm diff --git a/website/content/en/docs/olm-integration/legacy/quickstart-bundle.md b/website/content/en/docs/olm-integration/legacy/quickstart-bundle.md deleted file mode 100644 index ca66932598..0000000000 --- a/website/content/en/docs/olm-integration/legacy/quickstart-bundle.md +++ /dev/null @@ -1,147 +0,0 @@ ---- -title: OLM Integration Bundle Quickstart -linkTitle: Bundle Quickstart -weight: 1 ---- - -The [Operator Lifecycle Manager (OLM)][olm] is a set of cluster resources that manage the lifecycle of an Operator. -The Operator SDK supports both creating manifests for OLM deployment, and testing your Operator on an OLM-enabled -Kubernetes cluster. - -This document succinctly walks through getting an Operator OLM-ready with [bundles][bundle], and glosses over -explanations of certains steps for brevity. The following documents contain more detail on these steps: -- All operator-framework manifest commands supported by the SDK: [CLI overview][doc-cli-overview]. -- Generating operator-framework manifests: [generation overview][doc-olm-generate]. - -If you are working with package manifests, see the [package manifests quickstart][quickstart-package-manifests] -once you have completed the *Setup* section below. - -## Setup - -Let's first walk through creating an Operator for `memcached`, a distributed key-value store. - -Follow one of the user guides to develop the memcached-operator in either [Go][sdk-user-guide-go], -[Ansible][sdk-user-guide-ansible], or [Helm][sdk-user-guide-helm], depending on which Operator type you are interested in. -This guide assumes memcached-operator is on version `0.0.1`. - -### Enabling OLM - -Ensure OLM is enabled on your cluster before following this guide. [`operator-sdk olm`][cli-olm] -has several subcommands that can install, uninstall, and check the status of particular OLM versions in a cluster. - -**Note:** Certain cluster types may already have OLM enabled, but under a non-default (`"olm"`) namespace, -which can be configured by setting `--olm-namespace=[non-default-olm-namespace]` for `operator-sdk olm` subcommands -and `operator-sdk run packagemanifests`. - -You can check if OLM is already installed by running the following command, -which will detect the installed OLM version automatically (0.15.1 in this example): - -```console -$ operator-sdk olm status -INFO[0000] Fetching CRDs for version "0.15.1" -INFO[0002] Fetching resources for version "0.15.1" -INFO[0002] Successfully got OLM status for version "0.15.1" - -NAME NAMESPACE KIND STATUS -olm Namespace Installed -operatorgroups.operators.coreos.com CustomResourceDefinition Installed -catalogsources.operators.coreos.com CustomResourceDefinition Installed -subscriptions.operators.coreos.com CustomResourceDefinition Installed -... -``` - -All resources listed should have status `Installed`. - -If OLM is not already installed, go ahead and install the latest version: - -```console -$ operator-sdk olm install -INFO[0000] Fetching CRDs for version "latest" -INFO[0001] Fetching resources for version "latest" -INFO[0007] Creating CRDs and resources -INFO[0007] Creating CustomResourceDefinition "clusterserviceversions.operators.coreos.com" -INFO[0007] Creating CustomResourceDefinition "installplans.operators.coreos.com" -INFO[0007] Creating CustomResourceDefinition "subscriptions.operators.coreos.com" -... -NAME NAMESPACE KIND STATUS -clusterserviceversions.operators.coreos.com CustomResourceDefinition Installed -installplans.operators.coreos.com CustomResourceDefinition Installed -subscriptions.operators.coreos.com CustomResourceDefinition Installed -catalogsources.operators.coreos.com CustomResourceDefinition Installed -... -``` - -**Note:** By default, `olm status` and `olm uninstall` auto-detect the OLM version installed in your cluster. -This can fail if the installation is broken in some way, so the version of OLM can be overridden using the -`--version` flag provided with these commands. - -## Creating a bundle - -_If working with package manifests, see the [package manifests quickstart][quickstart-package-manifests]._ - -We will now create bundle manifests and metadata by running `generate bundle` in the root of the memcached-operator project. - -```sh -$ operator-sdk generate bundle --version 0.0.1 -``` - -A bundle manifests directory `deploy/olm-catalog/memcached-operator/manifests` containing a CSV and all CRDs -in `deploy/crds`, a bundle [metadata][bundle-metadata] directory `deploy/olm-catalog/memcached-operator/metadata`, -and a [Dockerfile][bundle-dockerfile] `bundle.Dockerfile` have been created in the Operator project. - -These files can be statically validated by `bundle validate` to ensure the on-disk bundle representation is correct: - -```console -$ operator-sdk bundle validate ./deploy/olm-catalog/memcached-operator -INFO[0000] Found annotations file bundle-dir=deploy/olm-catalog/memcached-operator container-tool=docker -INFO[0000] Could not find optional dependencies file bundle-dir=deploy/olm-catalog/memcached-operator container-tool=docker -INFO[0000] All validation tests have completed successfully -``` - -## Deploying an Operator with OLM - -At this point in development we've generated all files necessary to build the memcached-operator bundle. -Now we're ready to test and deploy the Operator with OLM. - -### Deploying bundles in production - -OLM and Operator Registry consumes Operator bundles via an [index image][index-image], -which are composed of one or more bundles. To build a memcached-operator bundle, run: - -```console -$ docker build -f bundle.Dockerfile -t quay.io//memcached-operator:v0.1.0 . -``` - -Although we've validated on-disk manifests and metadata, we also must make sure the bundle itself is valid: - -```console -$ docker push quay.io//memcached-operator:v0.1.0 -$ operator-sdk bundle validate quay.io//memcached-operator:v0.1.0 -INFO[0000] Unpacked image layers bundle-dir=/tmp/bundle-716785960 container-tool=docker -INFO[0000] running docker pull bundle-dir=/tmp/bundle-716785960 container-tool=docker -INFO[0002] running docker save bundle-dir=/tmp/bundle-716785960 container-tool=docker -INFO[0002] All validation tests have completed successfully bundle-dir=/tmp/bundle-716785960 container-tool=docker -``` - -The SDK does not build index images; instead, use the Operator package manager tool [`opm`][opm] to -[build][doc-index-build] one. Once one has been built, follow the index image [usage docs][doc-olm-index] -to add an index to a cluster catalog, and the catalog [discovery docs][doc-olm-discovery] to tell OLM -about your cataloged Operator. - - -[sdk-user-guide-go]:/docs/golang/legacy/quickstart -[sdk-user-guide-ansible]:/docs/ansible/quickstart -[sdk-user-guide-helm]:/docs/helm/quickstart -[quickstart-package-manifests]:/docs/olm-integration/legacy/quickstart-package-manifests -[olm]:https://github.com/operator-framework/operator-lifecycle-manager/ -[bundle]:https://github.com/operator-framework/operator-registry/blob/v1.12.6/docs/design/operator-bundle.md -[bundle-metadata]:https://github.com/operator-framework/operator-registry/blob/v1.12.6/docs/design/operator-bundle.md#bundle-annotations -[bundle-dockerfile]:https://github.com/operator-framework/operator-registry/blob/v1.12.6/docs/design/operator-bundle.md#bundle-dockerfile -[cli-olm]:/docs/cli/operator-sdk_olm -[doc-cli-overview]:/docs/olm-integration/legacy/cli-overview -[doc-olm-generate]:/docs/olm-integration/legacy/generating-a-csv -[opm]:https://github.com/operator-framework/operator-registry/blob/master/docs/design/opm-tooling.md -[index-image]:https://github.com/operator-framework/operator-registry/blob/master/docs/design/opm-tooling.md#index -[doc-index-build]:https://github.com/operator-framework/operator-registry#building-an-index-of-operators-using-opm -[doc-olm-index]:https://github.com/operator-framework/operator-registry#using-the-index-with-operator-lifecycle-manager -[doc-olm-discovery]:https://github.com/operator-framework/operator-lifecycle-manager/#discovery-catalogs-and-automated-upgrades diff --git a/website/content/en/docs/olm-integration/legacy/quickstart-package-manifests.md b/website/content/en/docs/olm-integration/legacy/quickstart-package-manifests.md deleted file mode 100644 index 985c0b5d13..0000000000 --- a/website/content/en/docs/olm-integration/legacy/quickstart-package-manifests.md +++ /dev/null @@ -1,87 +0,0 @@ ---- -title: OLM Integration Package Manifests Quickstart -linkTitle: Package Manifests Quickstart -weight: 2 ---- - -This guide assumes you have followed the introduction and *Setup* section of the [bundle quickstart][quickstart-bundle]. - -## Creating package manifests - -We will now create a package manifests format by running `generate packagemanifests` in the root of the memcached-operator project: - -```sh -$ operator-sdk generate packagemanifests --version 0.0.1 -``` - -A versioned manifests directory `deploy/olm-catalog/memcached-operator/0.0.1` containing a CSV and all CRDs -in `deploy/crds` and a package manifest YAML file `deploy/olm-catalog/memcached-operator/memcached-operator.package.yaml` -have been created in the Operator project. - -## Deploying an Operator with OLM - -At this point in development we've generated all files necessary to build a memcached-operator registry. -Now we're ready to test the Operator with OLM. - -### Testing package manifests - -[`operator-sdk run packagemanifests`][cli-run-packagemanifests] will create an Operator [registry][operator-registry] -from manifests and metadata in the memcached-operator project, and inform OLM that memcached-operator v0.0.1 -is ready to be deployed. This process effectively replicates production deployment in a constrained manner -to make sure OLM can deploy our Operator successfully before attempting real production deployment. - -`run packagemanifests` performs some optionally configurable setup [under the hood][doc-testing-deployment], but for -most use cases the following invocation is all we need: - -```console -$ operator-sdk run packagemanifests --operator-version 0.0.1 -INFO[0000] Running operator from directory deploy/olm-catalog/memcached-operator -INFO[0000] Creating memcached-operator registry -INFO[0000] Creating ConfigMap "olm/memcached-operator-registry-manifests-package" -INFO[0000] Creating ConfigMap "olm/memcached-operator-registry-manifests-0-0-1" -INFO[0000] Creating Deployment "olm/memcached-operator-registry-server" -INFO[0000] Creating Service "olm/memcached-operator-registry-server" -INFO[0000] Waiting for Deployment "olm/memcached-operator-registry-server" rollout to complete -INFO[0000] Waiting for Deployment "olm/memcached-operator-registry-server" to rollout: 0 of 1 updated replicas are available -INFO[0066] Deployment "olm/memcached-operator-registry-server" successfully rolled out -INFO[0066] Creating resources -INFO[0066] Creating CatalogSource "default/memcached-operator-ocs" -INFO[0066] Creating Subscription "default/memcached-operator-v0-0-1-sub" -INFO[0066] Creating OperatorGroup "default/operator-sdk-og" -INFO[0066] Waiting for ClusterServiceVersion "default/memcached-operator.v0.0.1" to reach 'Succeeded' phase -INFO[0066] Waiting for ClusterServiceVersion "default/memcached-operator.v0.0.1" to appear -INFO[0073] Found ClusterServiceVersion "default/memcached-operator.v0.0.1" phase: Pending -INFO[0077] Found ClusterServiceVersion "default/memcached-operator.v0.0.1" phase: InstallReady -INFO[0078] Found ClusterServiceVersion "default/memcached-operator.v0.0.1" phase: Installing -INFO[0036] Found ClusterServiceVersion "default/memcached-operator.v0.0.1" phase: Succeeded -INFO[0037] Successfully installed "memcached-operator.v0.0.1" on OLM version "0.15.1" - -NAME NAMESPACE KIND STATUS -memcacheds.cache.example.com default CustomResourceDefinition Installed -memcached-operator.v0.0.1 default ClusterServiceVersion Installed -``` - -As long as both the `ClusterServiceVersion` and all `CustomResourceDefinition`'s return an `Installed` status, -the memcached-operator has been deployed successfully. - -Now that we're done testing the memcached-operator, we should probably clean up the Operator's resources. -[`operator-sdk cleanup packagemanifests`][cli-cleanup-packagemanifests] will do this for you: - -```console -$ operator-sdk cleanup packagemanifests --operator-version 0.0.1 -INFO[0000] Deleting resources -INFO[0000] Deleting CatalogSource "default/memcached-operator-ocs" -INFO[0000] Deleting Subscription "default/memcached-operator-v0-0-1-sub" -INFO[0000] Deleting OperatorGroup "default/operator-sdk-og" -INFO[0000] Deleting CustomResourceDefinition "default/memcacheds.example.com" -INFO[0000] Deleting ClusterServiceVersion "default/memcached-operator.v0.0.1" -INFO[0000] Waiting for deleted resources to disappear -INFO[0001] Successfully uninstalled "memcached-operator.v0.0.1" on OLM version "0.15.1" -``` - - -[quickstart-bundle]:/docs/olm-integration/legacy/quickstart-bundle -[operator-registry]:https://github.com/operator-framework/operator-registry -[cli-run-packagemanifests]:/docs/cli/operator-sdk_run_packagemanifests -[cli-cleanup-packagemanifests]:/docs/cli/operator-sdk_cleanup_packagemanifests -[doc-testing-deployment]:/docs/olm-integration/legacy/testing-deployment diff --git a/website/content/en/docs/olm-integration/legacy/testing-deployment.md b/website/content/en/docs/olm-integration/legacy/testing-deployment.md deleted file mode 100644 index dec9e3a852..0000000000 --- a/website/content/en/docs/olm-integration/legacy/testing-deployment.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -title: Testing Operator Deployment with OLM -linkTitle: Testing Deployment -weight: 30 ---- - -This document discusses the behavior of `operator-sdk ` subcommands related to OLM deployment, -and assumes you are familiar with [OLM][olm], related terminology, -and have read the SDK-OLM integration [design proposal][sdk-olm-design]. - -Only the package manifests format is supported by `` subcommands. - -**Note:** before continuing, please read the [caveats](#caveats) section below. - -## `operator-sdk packagemanifests` command overview - -`operator-sdk packagemanifests` assumes OLM is already installed and running on your cluster, -and that your Operator has a valid [package manifests format][package-manifests]. -See the [CLI overview][doc-cli-overview] for commands to work with an OLM installation and generate a package manifests format. - -Let's look at the anatomy of the `run packagemanifests` (which is the same for `cleanup`) configuration model: - -- **kubeconfig-path**: the local path to a kubeconfig. - - This uses well-defined default loading rules to load the config if empty. -- **olm-namespace**: the namespace in which OLM is installed. -- **operator-namespace**: the cluster namespace in which Operator resources are created. - - This namespace must already exist in the cluster or be defined in a manifest passed to **include-paths**. -- **manifests-dir**: a directory containing the Operator's package manifests. -- **operator-version**: the version of the Operator to deploy. It must be a semantic version, ex. 0.0.1. - - This version must match the version of the CSV manifest found in **manifests-dir**, - ex. `deploy/olm-catalog//0.0.1` in an Operator SDK project. -- **install-mode**: specifies which supported [`installMode`][csv-install-modes] should be used to - create an `OperatorGroup` by configuring its `spec.targetNamespaces` field. - - The `InstallModeType` string passed must be marked as "supported" in the CSV being installed. - The namespaces passed must exist or be created by passing a `Namespace` manifest to IncludePaths. - - This option understands the following strings (assuming your CSV does as well): - - `OwnNamespace`: the Operator will watch its own namespace (from **operator-namespace** or the kubeconfig default). - This is the default. - - `SingleNamespace="my-ns"`: the Operator will watch a namespace, not necessarily its own. - - `AllNamespaces=""`: the Operator will watch all namespaces (cluster-scoped Operators). -- **include-paths**: a list of paths to manifests of Kubernetes resources that either - supplement or supplant defaults generated by `run`, ex. RBAC kinds. - - This option can be used if you have an existing set of manifests outside your versioned package - (ex. catalog manifests like a `Subscription`, `CatalogSource`, and/or `OperatorGroup`) - you wish to use instead of the corresponding defaults. - - Paths supplied to this command will be created with the same behavior of `kubectl create -f `. - - Kinds that are overridden if supplied: `CatalogSource`, `Subscription`, `OperatorGroup`. - - If a `Subscription` or `CatalogSource` are supplied, the other must be supplied - since they are linked by field references. -- **timeout**: a time string dictating the maximum time that `run` can run. The command will - return an error if the timeout is exceeded. - -### Caveats - -- ` packagemanifests` are intended to be used for testing purposes only, -since this command creates a transient image registry that should not be used in production. -Typically a registry is deployed separately and a set of catalog manifests are created in the cluster -to inform OLM of that registry and which Operator versions it can deploy and where to deploy the Operator. -- `run packagemanifests` can only deploy one Operator and one version of that Operator at a time, -hence its intended purpose being testing only. - - -[olm]:https://github.com/operator-framework/operator-lifecycle-manager/ -[sdk-olm-design]:https://github.com/operator-framework/operator-sdk/blob/master/proposals/sdk-integration-with-olm.md -[doc-cli-overview]:/docs/olm-integration/legacy/cli-overview -[package-manifests]:https://github.com/operator-framework/operator-registry/tree/v1.5.3#manifest-format -[csv-install-modes]:https://github.com/operator-framework/operator-lifecycle-manager/blob/master/doc/design/building-your-csv.md#operator-metadata -[cli-olm-install]:/docs/cli/operator-sdk_olm_install -[cli-olm-status]:/docs/cli/operator-sdk_olm_status diff --git a/website/content/en/docs/olm-integration/quickstart-bundle.md b/website/content/en/docs/olm-integration/quickstart-bundle.md index 3cd279af66..11ae3fb06c 100644 --- a/website/content/en/docs/olm-integration/quickstart-bundle.md +++ b/website/content/en/docs/olm-integration/quickstart-bundle.md @@ -139,7 +139,7 @@ to add an index to a cluster catalog, and the catalog [discovery docs][doc-olm-d about your cataloged Operator. -[sdk-user-guide-go]:/docs/golang/legacy/quickstart +[sdk-user-guide-go]:/docs/golang/quickstart [sdk-user-guide-ansible]:/docs/ansible/quickstart [sdk-user-guide-helm]:/docs/helm/quickstart [quickstart-package-manifests]:/docs/olm-integration/quickstart-package-manifests diff --git a/website/content/en/docs/scorecard/scorecard.md b/website/content/en/docs/scorecard/scorecard.md deleted file mode 100644 index 52ec98bc97..0000000000 --- a/website/content/en/docs/scorecard/scorecard.md +++ /dev/null @@ -1,676 +0,0 @@ ---- -title: Scorecard - current -weight: 25 ---- - -# operator-sdk scorecard - -## Overview - -The scorecard works by creating all resources required by CRs and the operator. - -The scorecard will create another container in the operator deployment which is used to record calls to the API server and run a lot of the tests. The tests performed will also examine some of the fields in the CRs. - -The scorecard also supports plugins which allows to extend the functionality of the scorecard and add additional tests on it. - -## Requirements - -Following are some requirements for the operator project which would be checked by the scorecard. - -- Access to a Kubernetes v1.11.3+ cluster - -**For non-SDK operators:** - -- Resource manifests for installing/configuring the operator and custom resources. (see the [Writing E2E Tests][writing-tests] doc for more information on the global and namespaced manifests) -- Config getter that supports reading from the `KUBECONFIG` environment variable (such as the `clientcmd` or `controller-runtime` config getters). This is required for the scorecard proxy to work correctly. - -**NOTE:** If you would like to use it to check the integration of your operator project with [OLM][olm] then also the [Cluster Service Version (CSV)][olm-csv] file will be required. This is a requirement when the `olm-deployed` option is used - -## Running the Scorecard - -1. Setup the `.osdk-scorecard.yaml` configuration file in your project. See [Config file](#config-file) -2. Create the namespace defined in the RBAC files(`role_binding`) -3. Then, run the [`scorecard` command][cli-scorecard]. See the [Command args](#command-args) to check its options. - -**NOTE:** If your operator is non-SDK then some steps will be required in order to meet its requirements. - -## Configuration - -The scorecard is configured by a config file that allows configuring internal plugins as well as a few global configuration options. - -### Config File - -To use scorecard, you need to create a config file which by default will be `/.osdk-scorecard.yaml`.The following is an example of how the config file may look: - -```yaml -scorecard: - # Setting a global scorecard option - output: json - plugins: - # `basic` tests configured to test 2 CRs - - basic: - cr-manifest: - - "deploy/crds/cache.example.com_v1alpha1_memcached_cr.yaml" - - "deploy/crds/cache.example.com_v1alpha1_memcachedrs_cr.yaml" - # `olm` tests configured to test 2 CRs - - olm: - cr-manifest: - - "deploy/crds/cache.example.com_v1alpha1_memcached_cr.yaml" - - "deploy/crds/cache.example.com_v1alpha1_memcachedrs_cr.yaml" - csv-path: "deploy/olm-catalog/memcached-operator/0.0.3/memcached-operator.v0.0.3.clusterserviceversion.yaml" -``` - -The hierarchy of config methods for the global options that are also configurable via a flag from highest priority to least is: flag->file->default. - -The config file support is provided by the `viper` package. For more info on how viper configuration works, see [`viper`'s README][viper]. - -**NOTE:** The config file can be in any of the `json`, `yaml`, or `toml` formats as long as the file has the correct extension. As the config file may be extended to allow configuration -of all `operator-sdk` subcommands in the future, the scorecard's configuration must be under a `scorecard` subsection. - -### Command Args - -While most configuration is done via a config file, there are a few important args that can be used as follows. - -| Flag | Type | Description | -| -------- | -------- | -------- | -| `--bundle`, `-b` | string | The path to a bundle directory used for the bundle validation test. | -| `--config` | string | Path to config file (default `/.osdk-scorecard.yaml`; file type and extension must be `.yaml`). If a config file is not provided and a config file is not found at the default location, the scorecard will exit with an error. | -| `--output`, `-o` | string | Output format. Valid options are: `text` and `json`. The default format is `text`, which is designed to be a simpler human readable format. The `json` format uses the JSON schema output format used for plugins defined later in this document. | -| `--kubeconfig`, `-o` | string | path to kubeconfig. It sets the kubeconfig internally for internal plugins. | -| `--version` | string | The version of scorecard to run, v1alpha2 is the default, valid values are v1alpha2. | -| `--selector`, `-l` | string | The label selector to filter tests on. | -| `--list`, `-L` | bool | If true, only print the test names that would be run based on selector filtering. | - -### Config File Options - -| Option | Type | Description | -| -------- | -------- | -------- | - `bundle` | string | equivalent of the `--bundle` flag. OLM bundle directory path, when specified runs bundle validation | -| `output` | string | equivalent of the `--output` flag. If this option is defined by both the config file and the flag, the flag's value takes priority | -| `kubeconfig` | string | equivalent of the `--kubeconfig` flag. If this option is defined by both the config file and the flag, the flag's value takes priority | -| `plugins` | array | this is an array of [Plugins](#plugins).| - -### Plugins - -A plugin object is used to configure plugins. The possible values for the plugin object are `basic`, or `olm`. - -Note that each Plugin type has different configuration options and they are named differently in the config. Only one of these fields can be set per plugin. - -#### Basic and OLM - -The `basic` and `olm` internal plugins have the same configuration fields: - -| Option | Type | Description | -| -------- | -------- | -------- | -| `cr-manifest` | [\]string | path(s) for CRs being tested.(required if `olm-deployed` is not set or false) | -| `csv-path` | string | path to CSV for the operator (required for OLM tests or if `olm-deployed` is set to true) | -| `olm-deployed` | bool | indicates that the CSV and relevant CRD's have been deployed onto the cluster by the [Operator Lifecycle Manager (OLM)][olm] | -| `kubeconfig` | string | path to kubeconfig. If both the global `kubeconfig` and this field are set, this field is used for the plugin | -| `namespace` | string | namespace to run the plugins in. If not set, the default specified by the kubeconfig is used | -| `init-timeout` | int | time in seconds until a timeout during initialization or cleanup of the operator | -| `crds-dir` | string | path to directory containing CRDs that must be deployed to the cluster | -| `namespaced-manifest` | string | manifest file with all resources that run within a namespace. By default, the scorecard will combine `service_account.yaml`, `role.yaml`, `role_binding.yaml`, and `operator.yaml` from the `deploy` directory into a temporary manifest to use as the namespaced manifest | -| `global-manifest` | string | manifest containing required resources that run globally (not namespaced). By default, the scorecard will combine all CRDs in the `crds-dir` directory into a temporary manifest to use as the global manifest | -| `proxy-port` | int | port for scorecard-proxy to listen to, default is port 8889 | - -## Tests Performed - -Following the description of each internal [Plugin](#plugins). Note that are 8 internal tests across 2 internal plugins that the scorecard can run. If multiple CRs are specified for a plugin, the test environment is fully cleaned up after each CR so each CR gets a clean testing environment. - -Each test has a `short name` that uniquely identifies the test. This is useful for selecting a specific test or tests to run as follows: -```sh -operator-sdk scorecard -o text --selector=test=checkspectest -operator-sdk scorecard -o text --selector='test in (checkspectest,checkstatustest)' -``` - -### Basic Operator - -| Test | Description | Short Name | -| -------- | -------- | -------- | -| Spec Block Exists | This test checks the Custom Resource(s) created in the cluster to make sure that all CRs have a spec block. This test has a maximum score of 1 | checkspectest | -| Status Block Exists | This test checks the Custom Resource(s) created in the cluster to make sure that all CRs have a status block. This test has a maximum score of 1 | checkstatustest | -| Writing Into CRs Has An Effect | This test reads the scorecard proxy's logs to verify that the operator is making `PUT` and/or `POST` requests to the API server, indicating that it is modifying resources. This test has a maximum score of 1 | writingintocrshaseffecttest | - -### OLM Integration - -| Test | Description | Short Name | -| -------- | -------- | -------- | -| OLM Bundle Validation | This test validates the OLM bundle manifests found in the bundle directory as specifed by the bundle flag. If the bundle contents contain errors, then the test result output will include the validator log as well as error messages from the validation library. See this [document][olm-bundle] for details on OLM bundles.| bundlevalidationtest | -| Provided APIs have validation |This test verifies that the CRDs for the provided CRs contain a validation section and that there is validation for each spec and status field detected in the CR. This test has a maximum score equal to the number of CRs provided via the `cr-manifest` option. | crdshavevalidationtest | -| Owned CRDs Have Resources Listed | This test makes sure that the CRDs for each CR provided via the `cr-manifest` option have a `resources` subsection in the [`owned` CRDs section][owned-crds] of the CSV. If the test detects used resources that are not listed in the resources section, it will list them in the suggestions at the end of the test. This test has a maximum score equal to the number of CRs provided via the `cr-manifest` option. | crdshaveresourcestest | -| Spec Fields With Descriptors | This test verifies that every field in the Custom Resources' spec sections have a corresponding descriptor listed in the CSV. This test has a maximum score equal to the total number of fields in the spec sections of each custom resource passed in via the `cr-manifest` option. | specdescriptorstest | -| Status Fields With Descriptors | This test verifies that every field in the Custom Resources' status sections have a corresponding descriptor listed in the CSV. This test has a maximum score equal to the total number of fields in the status sections of each custom resource passed in via the `cr-manifest` option. | statusdescriptorstest | - -## Exit Status - -The scorecard return code is 1 if any of the tests executed did not pass and 0 if all selected tests pass. - -## Extending the Scorecard with Plugins - -To allow the scorecard to be further extended and capable of more complex testing as well as allow the community to make their own scorecard tests, a plugin system has been implemented -for the scorecard. To use it, a plugin developer simply needs to provide the binary or script, and the user can then configure the scorecard to use the new plugin. Since the scorecard -can run any executable as a plugin, the plugins can be written in any programming language supported by the OS the scorecard is being run on. All plugins are run from the root of the -operator project. - -To provide results to the scorecard, the plugin must output a valid JSON object to its `stdout`. Invalid JSON in `stdout` will result in the plugin being marked as errored. -To provide logs to the scorecard, plugins can either set the `log` field for the scorecard suites they return or they can output logs to `stderr`, which will stream the log -to the console if the scorecard is being run in with `output` unset or set to `text`, or be added to the main `ScorecardOutput.Log` field when `output` is set to `json` - -### JSON format - -The JSON output is formatted in the same way that a Kubernetes API would be, which allows for updates to the schema as well as the use of various Kubernetes helpers. The Golang structs are defined in `pkg/apis/scorecard/v1alpha2/types.go` and can be easily implemented by plugins written in Golang. Below is the JSON Schema: - -```json -{ - "$schema": "http://json-schema.org/draft-04/schema#", - "$ref": "#/definitions/ScorecardOutput", - "definitions": { - "FieldsV1": { - "additionalProperties": false, - "type": "object" - }, - "ManagedFieldsEntry": { - "properties": { - "apiVersion": { - "type": "string" - }, - "fieldsType": { - "type": "string" - }, - "fieldsV1": { - "$schema": "http://json-schema.org/draft-04/schema#", - "$ref": "#/definitions/FieldsV1" - }, - "manager": { - "type": "string" - }, - "operation": { - "type": "string" - }, - "time": { - "$ref": "#/definitions/Time" - } - }, - "additionalProperties": false, - "type": "object" - }, - "ObjectMeta": { - "properties": { - "annotations": { - "patternProperties": { - ".*": { - "type": "string" - } - }, - "type": "object" - }, - "clusterName": { - "type": "string" - }, - "creationTimestamp": { - "$schema": "http://json-schema.org/draft-04/schema#", - "$ref": "#/definitions/Time" - }, - "deletionGracePeriodSeconds": { - "type": "integer" - }, - "deletionTimestamp": { - "$ref": "#/definitions/Time" - }, - "finalizers": { - "items": { - "type": "string" - }, - "type": "array" - }, - "generateName": { - "type": "string" - }, - "generation": { - "type": "integer" - }, - "labels": { - "patternProperties": { - ".*": { - "type": "string" - } - }, - "type": "object" - }, - "managedFields": { - "items": { - "$schema": "http://json-schema.org/draft-04/schema#", - "$ref": "#/definitions/ManagedFieldsEntry" - }, - "type": "array" - }, - "name": { - "type": "string" - }, - "namespace": { - "type": "string" - }, - "ownerReferences": { - "items": { - "$schema": "http://json-schema.org/draft-04/schema#", - "$ref": "#/definitions/OwnerReference" - }, - "type": "array" - }, - "resourceVersion": { - "type": "string" - }, - "selfLink": { - "type": "string" - }, - "uid": { - "type": "string" - } - }, - "additionalProperties": false, - "type": "object" - }, - "OwnerReference": { - "required": [ - "apiVersion", - "kind", - "name", - "uid" - ], - "properties": { - "apiVersion": { - "type": "string" - }, - "blockOwnerDeletion": { - "type": "boolean" - }, - "controller": { - "type": "boolean" - }, - "kind": { - "type": "string" - }, - "name": { - "type": "string" - }, - "uid": { - "type": "string" - } - }, - "additionalProperties": false, - "type": "object" - }, - "ScorecardOutput": { - "required": [ - "TypeMeta", - "log", - "results" - ], - "properties": { - "TypeMeta": { - "$schema": "http://json-schema.org/draft-04/schema#", - "$ref": "#/definitions/TypeMeta" - }, - "log": { - "type": "string" - }, - "metadata": { - "$schema": "http://json-schema.org/draft-04/schema#", - "$ref": "#/definitions/ObjectMeta" - }, - "results": { - "items": { - "$schema": "http://json-schema.org/draft-04/schema#", - "$ref": "#/definitions/ScorecardTestResult" - }, - "type": "array" - } - }, - "additionalProperties": false, - "type": "object" - }, - "ScorecardTestResult": { - "required": [ - "name", - "description" - ], - "properties": { - "description": { - "type": "string" - }, - "errors": { - "items": { - "type": "string" - }, - "type": "array" - }, - "labels": { - "patternProperties": { - ".*": { - "type": "string" - } - }, - "type": "object" - }, - "log": { - "type": "string" - }, - "name": { - "type": "string" - }, - "state": { - "type": "string" - }, - "suggestions": { - "items": { - "type": "string" - }, - "type": "array" - } - }, - "additionalProperties": false, - "type": "object" - }, - "Time": { - "additionalProperties": false, - "type": "object" - }, - "TypeMeta": { - "properties": { - "apiVersion": { - "type": "string" - }, - "kind": { - "type": "string" - } - }, - "additionalProperties": false, - "type": "object" - } - } -} -``` - -**NOTE:** The `ScorecardOutput` object is designed the same as a Kubernetes API, and thus also has a full `TypeMeta` and `ObjectMeta`. This means that it contains -various other fields such as `selfLink`, `uid`, and others. At the moment, the only required fields and the only fields that will be checked by the scorecard -are the `kind` and `apiVersion` fields as listed in the above JSONSchema. - -Example of a valid JSON output: - -```json -{ - "kind": "ScorecardOutput", - "apiVersion": "osdk.openshift.io/v1alpha2", - "metadata": { - "creationTimestamp": null - }, - "log": "time=\"2020-01-16T15:30:41-06:00\" level=info msg=\"Using config file: /home/someuser/projects/memcached-operator/.osdk-scorecard.yaml\"\n", - "results": [ - { - "name": "Spec Block Exists", - "description": "Custom Resource has a Spec Block", - "labels": { - "necessity": "required", - "suite": "basic", - "test": "checkspectest" - }, - "state": "pass" - }, - { - "name": "Status Block Exists", - "description": "Custom Resource has a Status Block", - "labels": { - "necessity": "required", - "suite": "basic", - "test": "checkstatustest" - }, - "state": "pass" - }, - { - "name": "Writing into CRs has an effect", - "description": "A CR sends PUT/POST requests to the API server to modify resources in response to spec block changes", - "labels": { - "necessity": "required", - "suite": "basic", - "test": "writingintocrshaseffecttest" - }, - "state": "pass" - }, - { - "name": "Bundle Validation Test", - "description": "Validates bundle contents", - "labels": { - "necessity": "required", - "suite": "olm", - "test": "bundlevalidationtest" - }, - "state": "fail", - "errors": [ - "unable to find the OLM 'bundle' directory which is required for this test" - ] - }, - { - "name": "Provided APIs have validation", - "description": "All CRDs have an OpenAPI validation subsection", - "labels": { - "necessity": "required", - "suite": "olm", - "test": "crdshavevalidationtest" - }, - "state": "pass" - }, - { - "name": "Owned CRDs have resources listed", - "description": "All Owned CRDs contain a resources subsection", - "labels": { - "necessity": "required", - "suite": "olm", - "test": "crdshaveresourcestest" - }, - "state": "fail", - "suggestions": [ - "If it would be helpful to an end-user to understand or troubleshoot your CR, consider adding resources [memcacheds/v1alpha1 replicasets/v1 deployments/v1 services/v1 servicemonitors/v1 pods/v1 configmaps/v1] to the resources section for owned CRD Memcached" - ] - }, - { - "name": "Spec fields with descriptors", - "description": "All spec fields have matching descriptors in the CSV", - "labels": { - "necessity": "required", - "suite": "olm", - "test": "specdescriptorstest" - }, - "state": "fail", - "suggestions": [ - "Add a spec descriptor for size" - ] - }, - { - "name": "Status fields with descriptors", - "description": "All status fields have matching descriptors in the CSV", - "labels": { - "necessity": "required", - "suite": "olm", - "test": "statusdescriptorstest" - }, - "state": "fail", - "suggestions": [ - "Add a status descriptor for status" - ] - } - ] -} -``` - -**NOTE:** The `ScorecardOutput.Log` field is only intended to be used to log the scorecard's output and the scorecard will ignore that field if a plugin provides it. -To add logs to the main `ScorecardOuput.Log` field, a plugin can output the logs to `stderr`. - -## Running the scorecard with an OLM-managed operator - -The scorecard can be run using a [Cluster Service Version (CSV)][olm-csv], providing a way to test cluster-ready and non-SDK operators. - -Running with a CSV alone requires both the `csv-path: ` and `olm-deployed` options to be set. The scorecard assumes your CSV and relevant CRD's have been deployed onto the cluster using OLM when using `olm-deployed`. - -The scorecard requires a proxy container in the operator's `Deployment` pod to read operator logs. A few modifications to your CSV and creation of one extra object are required to run the proxy _before_ deploying your operator with OLM: - -1. Create a proxy server secret containing a local Kubeconfig: - 1. Generate a username using the scorecard proxy's namespaced owner reference. - ```sh - # Substitute "$your_namespace" for the namespace your operator will be deployed in (if any). - $ echo '{"apiVersion":"","kind":"","name":"scorecard","uid":"","Namespace":"'${your_namespace}'"}' | base64 -w 0 - eyJhcGlWZXJzaW9uIjoiIiwia2luZCI6IiIsIm5hbWUiOiJzY29yZWNhcmQiLCJ1aWQiOiIiLCJOYW1lc3BhY2UiOiJvbG0ifQo= - ``` - 1. Write a `Config` manifest `scorecard-config.yaml` using the following template, substituting `${your_username}` for the base64 username generated above: - ```yaml - apiVersion: v1 - kind: Config - clusters: - - cluster: - insecure-skip-tls-verify: true - server: http://${your_username}@localhost:8889 - name: proxy-server - contexts: - - context: - cluster: proxy-server - user: admin/proxy-server - name: $namespace/proxy-server - current-context: $namespace/proxy-server - preferences: {} - users: - - name: admin/proxy-server - user: - username: ${your_username} - password: unused - ``` - 1. Encode the `Config` as base64: - ```sh - $ cat scorecard-config.yaml | base64 -w 0 - YXBpVmVyc2lvbjogdjEKa2luZDogQ29uZmlnCmNsdXN0ZXJzOgotIGNsdXN0ZXI6CiAgICBpbnNlY3VyZS1za2lwLXRscy12ZXJpZnk6IHRydWUKICAgIHNlcnZlcjogaHR0cDovL2V5SmhjR2xXWlhKemFXOXVJam9pSWl3aWEybHVaQ0k2SWlJc0ltNWhiV1VpT2lKelkyOXlaV05oY21RaUxDSjFhV1FpT2lJaUxDSk9ZVzFsYzNCaFkyVWlPaUp2YkcwaWZRbz1AbG9jYWxob3N0Ojg4ODkKICBuYW1lOiBwcm94eS1zZXJ2ZXIKY29udGV4dHM6Ci0gY29udGV4dDoKICAgIGNsdXN0ZXI6IHByb3h5LXNlcnZlcgogICAgdXNlcjogYWRtaW4vcHJveHktc2VydmVyCiAgbmFtZTogL3Byb3h5LXNlcnZlcgpjdXJyZW50LWNvbnRleHQ6IC9wcm94eS1zZXJ2ZXIKcHJlZmVyZW5jZXM6IHt9CnVzZXJzOgotIG5hbWU6IGFkbWluL3Byb3h5LXNlcnZlcgogIHVzZXI6CiAgICB1c2VybmFtZTogZXlKaGNHbFdaWEp6YVc5dUlqb2lJaXdpYTJsdVpDSTZJaUlzSW01aGJXVWlPaUp6WTI5eVpXTmhjbVFpTENKMWFXUWlPaUlpTENKT1lXMWxjM0JoWTJVaU9pSnZiRzBpZlFvPQogICAgcGFzc3dvcmQ6IHVudXNlZAo= - ``` - 1. Create a `Secret` manifest `scorecard-secret.yaml` containing the operator's namespace (if any) the `Config`'s base64 encoding as a `spec.data` value under the key `kubeconfig`: - ```yaml - apiVersion: v1 - kind: Secret - metadata: - name: scorecard-kubeconfig - namespace: ${your_namespace} - data: - kubeconfig: ${kubeconfig_base64} - ``` - 1. Apply the secret in-cluster: - ```sh - $ kubectl apply -f scorecard-secret.yaml - ``` - 1. Insert a volume referring to the `Secret` into the operator's `Deployment`: - ```yaml - spec: - install: - spec: - deployments: - - name: memcached-operator - spec: - ... - template: - ... - spec: - containers: - ... - volumes: - # scorecard kubeconfig volume - - name: scorecard-kubeconfig - secret: - secretName: scorecard-kubeconfig - items: - - key: kubeconfig - path: config - ``` -1. Insert a volume mount and `KUBECONFIG` environment variable into each container in your operator's `Deployment`: - ```yaml - spec: - install: - spec: - deployments: - - name: memcached-operator - spec: - ... - template: - ... - spec: - containers: - - name: container1 - ... - volumeMounts: - # scorecard kubeconfig volume mount - - name: scorecard-kubeconfig - mountPath: /scorecard-secret - env: - # scorecard kubeconfig env - - name: KUBECONFIG - value: /scorecard-secret/config - - name: container2 - # Do the same for this and all other containers. - ... - ``` -1. Insert the scorecard proxy container into the operator's `Deployment`: - ```yaml - spec: - install: - spec: - deployments: - - name: memcached-operator - spec: - ... - template: - ... - spec: - containers: - ... - # scorecard proxy container - - name: scorecard-proxy - command: - - scorecard-proxy - env: - - name: WATCH_NAMESPACE - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: metadata.namespace - image: quay.io/operator-framework/scorecard-proxy:master - imagePullPolicy: Always - ports: - - name: proxy - containerPort: 8889 - ``` - -Alternatively, the [community-operators][community-operators] repo has several bash functions that can perform these operations for you: -```sh -$ curl -Lo csv-manifest-modifiers.sh https://raw.githubusercontent.com/operator-framework/community-operators/master/scripts/lib/file -$ . ./csv-manifest-modifiers.sh -# $NAMESPACE is the namespace your operator will deploy in -$ create_kubeconfig_secret_file scorecard-secret.yaml "$NAMESPACE" -$ kubectl apply -f scorecard-secret.yaml -# $CSV_FILE is the path to your operator's CSV manifest -$ insert_kubeconfig_volume "$CSV_FILE" -$ insert_kubeconfig_secret_mount "$CSV_FILE" -$ insert_proxy_container "$CSV_FILE" "quay.io/operator-framework/scorecard-proxy:master" -``` - -Once done, follow the steps in this [document][olm-deploy-operator] to bundle your CSV and CRD's, deploy OLM on minikube or [OKD][okd], and deploy your operator. Once these steps have been completed, run the scorecard with both the `csv-path: ` and `olm-deployed` options set. - -**NOTES:** - -- As of now, using the scorecard with a CSV does not permit multiple CR manifests to be set through the CLI/config/CSV annotations. You will have to tear down your operator in the cluster, re-deploy, and re-run the scorecard for each CR being tested. In the future the scorecard will fully support testing multiple CR's without requiring users to teardown/standup each time. -- You can either set `cr-manifest` or your CSV's [`metadata.annotations['alm-examples']`][olm-csv-alm-examples] to provide CR's to the scorecard, but not both. - -[cli-scorecard]: /docs/cli/operator-sdk_scorecard -[writing-tests]: ../../golang/legacy/e2e-tests -[owned-crds]: https://github.com/operator-framework/operator-lifecycle-manager/blob/master/doc/design/building-your-csv.md#owned-crds -[alm-examples]: https://github.com/operator-framework/operator-lifecycle-manager/blob/master/doc/design/building-your-csv.md#crd-templates -[viper]: https://github.com/spf13/viper/blob/master/README.md -[olm-bundle]:https://github.com/operator-framework/operator-registry#manifest-format -[olm-csv]:https://github.com/operator-framework/operator-lifecycle-manager/blob/master/doc/design/building-your-csv.md -[olm-csv-alm-examples]:https://github.com/operator-framework/operator-lifecycle-manager/blob/master/doc/design/building-your-csv.md#crd-templates -[olm]:https://github.com/operator-framework/operator-lifecycle-manager -[olm-deploy-operator]:https://github.com/operator-framework/community-operators/blob/master/docs/testing-operators.md -[okd]:https://www.okd.io/ -[community-operators]:https://github.com/operator-framework/community-operators