-
Notifications
You must be signed in to change notification settings - Fork 75
Cleanup monitoring resource files #1017
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
This file was deleted.
This file was deleted.
This file was deleted.
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -1,9 +1,7 @@ | ||
| package common | ||
|
|
||
| import ( | ||
| "context" | ||
| "fmt" | ||
| "os" | ||
|
|
||
| mfclient "github.com/manifestival/controller-runtime-client" | ||
| mf "github.com/manifestival/manifestival" | ||
|
|
@@ -12,97 +10,76 @@ import ( | |
| v1 "k8s.io/api/core/v1" | ||
| metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||
| "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" | ||
| "k8s.io/apimachinery/pkg/types" | ||
| "k8s.io/apimachinery/pkg/util/intstr" | ||
| "k8s.io/client-go/kubernetes/scheme" | ||
| "knative.dev/pkg/kmeta" | ||
| "sigs.k8s.io/controller-runtime/pkg/client" | ||
| ) | ||
|
|
||
| const ( | ||
| EventingSourceServiceMonitorPath = "deploy/resources/monitoring/source-service-monitor.yaml" | ||
| EventingSourcePath = "deploy/resources/monitoring/source-service.yaml" | ||
| SourceLabel = "eventing.knative.dev/source" | ||
| SourceNameLabel = "eventing.knative.dev/sourceName" | ||
| SourceRoleLabel = "sources.knative.dev/role" | ||
| TestMonitor = "TEST_MONITOR" | ||
| TestSourceServiceMonitorPath = "TEST_SOURCE_SERVICE_MONITOR_PATH" | ||
| TestSourceServicePath = "TEST_SOURCE_SERVICE_PATH" | ||
| SourceLabel = "eventing.knative.dev/source" | ||
| SourceNameLabel = "eventing.knative.dev/sourceName" | ||
| SourceRoleLabel = "sources.knative.dev/role" | ||
| ) | ||
|
|
||
| func SetupSourceServiceMonitor(client client.Client, instance *appsv1.Deployment) error { | ||
| func SetupSourceServiceMonitorResources(client client.Client, instance *appsv1.Deployment) error { | ||
| labels := instance.Spec.Selector.MatchLabels | ||
|
|
||
| clientOptions := mf.UseClient(mfclient.NewClient(client)) | ||
| // create service for the deployment | ||
| manifest, err := mf.NewManifest(getMonitorPath(TestSourceServicePath, EventingSourcePath), clientOptions) | ||
| // Create service monitor resources for source | ||
| smManifest, err := createServiceMonitorManifest(labels, instance.Name, instance.Namespace, clientOptions) | ||
| if err != nil { | ||
| return fmt.Errorf("unable to parse source service manifest: %w", err) | ||
| } | ||
| transforms := []mf.Transformer{updateService(labels, instance.Name), mf.InjectOwner(instance), mf.InjectNamespace(instance.Namespace)} | ||
| if manifest, err = manifest.Transform(transforms...); err != nil { | ||
| return fmt.Errorf("unable to transform source service manifest: %w", err) | ||
| } | ||
| if err := manifest.Apply(); err != nil { | ||
| return err | ||
| } | ||
|
|
||
| // get service back, needed for the UID and setting owner refs | ||
| srv := &v1.Service{} | ||
| if err := client.Get(context.TODO(), types.NamespacedName{Name: instance.Name, Namespace: instance.Namespace}, srv); err != nil { | ||
| return err | ||
| } | ||
| // create service monitor for source | ||
| manifest, err = mf.NewManifest(getMonitorPath(TestSourceServiceMonitorPath, EventingSourceServiceMonitorPath), clientOptions) | ||
| if err != nil { | ||
| return fmt.Errorf("unable to parse source service monitor manifest: %w", err) | ||
| } | ||
| transforms = []mf.Transformer{updateServiceMonitor(labels, instance.Name), mf.InjectOwner(srv), mf.InjectNamespace(instance.Namespace)} | ||
| if manifest, err = manifest.Transform(transforms...); err != nil { | ||
| if *smManifest, err = smManifest.Transform(mf.InjectOwner(instance)); err != nil { | ||
| return fmt.Errorf("unable to transform source service monitor manifest: %w", err) | ||
| } | ||
| return manifest.Apply() | ||
| return smManifest.Apply() | ||
| } | ||
|
|
||
| func getMonitorPath(envVar string, defaultVal string) string { | ||
| path := os.Getenv(envVar) | ||
| if path == "" { | ||
| return defaultVal | ||
| func createServiceMonitorManifest(labels map[string]string, depName string, ns string, options mf.Option) (*mf.Manifest, error) { | ||
| var svU = &unstructured.Unstructured{} | ||
| var smU = &unstructured.Unstructured{} | ||
| sms := v1.Service{ | ||
| ObjectMeta: metav1.ObjectMeta{ | ||
| Name: depName, | ||
| Namespace: ns, | ||
| Labels: kmeta.CopyMap(labels), | ||
| }, | ||
| Spec: v1.ServiceSpec{ | ||
| Ports: []v1.ServicePort{{ | ||
| Name: "http-metrics", | ||
| Port: 9090, | ||
| TargetPort: intstr.FromInt(9090), | ||
| Protocol: "TCP", | ||
| }}, | ||
| Selector: kmeta.CopyMap(labels), | ||
| }} | ||
| sms.Labels["name"] = sms.Name | ||
| if err := scheme.Scheme.Convert(&sms, svU, nil); err != nil { | ||
| return nil, err | ||
| } | ||
| return path | ||
| } | ||
|
|
||
| func updateService(labels map[string]string, depName string) mf.Transformer { | ||
| return func(resource *unstructured.Unstructured) error { | ||
| if resource.GetKind() != "Service" { | ||
| return nil | ||
| } | ||
| var svc = &v1.Service{} | ||
| if err := scheme.Scheme.Convert(resource, svc, nil); err != nil { | ||
| return err | ||
| } | ||
| svc.Name = depName | ||
| svc.Labels = kmeta.CopyMap(labels) | ||
| svc.Spec.Selector = kmeta.CopyMap(labels) | ||
| svc.Labels["name"] = svc.Name | ||
| return scheme.Scheme.Convert(svc, resource, nil) | ||
| sm := monitoringv1.ServiceMonitor{ | ||
| ObjectMeta: metav1.ObjectMeta{ | ||
| Name: depName, | ||
| Namespace: ns, | ||
| Labels: kmeta.CopyMap(labels), | ||
| }, | ||
| Spec: monitoringv1.ServiceMonitorSpec{ | ||
| Endpoints: []monitoringv1.Endpoint{{Port: "http-metrics"}}, | ||
| NamespaceSelector: monitoringv1.NamespaceSelector{ | ||
| MatchNames: []string{ns}, | ||
| }, | ||
| Selector: metav1.LabelSelector{ | ||
| MatchLabels: map[string]string{"name": depName}, | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It'd be cool to "declaratively" connect this with the service above, for example like so: selector := map[string]string{"name": depName}
sms := v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: depName,
Namespace: ns,
Labels: kmeta.UnionMaps(labels, selector),
},
Spec: v1.ServiceSpec{
Ports: []v1.ServicePort{{
Name: "http-metrics",
Port: 9090,
TargetPort: intstr.FromInt(9090),
Protocol: "TCP",
}},
Selector: kmeta.CopyMap(labels),
}}
sm := monitoringv1.ServiceMonitor{
ObjectMeta: metav1.ObjectMeta{
Name: depName,
Namespace: ns,
Labels: kmeta.CopyMap(labels),
},
Spec: monitoringv1.ServiceMonitorSpec{
Endpoints: []monitoringv1.Endpoint{{Port: "http-metrics"}},
NamespaceSelector: monitoringv1.NamespaceSelector{
MatchNames: []string{ns},
},
Selector: metav1.LabelSelector{
MatchLabels: selector,
}
}} |
||
| }, | ||
| }} | ||
| sm.Labels["name"] = sm.Name | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Does the ServiceMonitor even need this label?
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Added for consistency with the service labels as done in that operator framework. |
||
| if err := scheme.Scheme.Convert(&sm, smU, nil); err != nil { | ||
| return nil, err | ||
| } | ||
| } | ||
|
|
||
| func updateServiceMonitor(labels map[string]string, depName string) mf.Transformer { | ||
| return func(resource *unstructured.Unstructured) error { | ||
| if resource.GetKind() != "ServiceMonitor" { | ||
| return nil | ||
| } | ||
| var sm = &monitoringv1.ServiceMonitor{} | ||
| if err := scheme.Scheme.Convert(resource, sm, nil); err != nil { | ||
| return err | ||
| } | ||
| sm.Name = depName | ||
| sm.Labels = kmeta.CopyMap(labels) | ||
| sm.Spec.Selector = metav1.LabelSelector{ | ||
| MatchLabels: map[string]string{"name": sm.Name}, | ||
| } | ||
| sm.Labels["name"] = sm.Name | ||
| return scheme.Scheme.Convert(sm, resource, nil) | ||
| smManifest, err := mf.ManifestFrom(mf.Slice([]unstructured.Unstructured{*svU, *smU}), options) | ||
| if err != nil { | ||
| return nil, err | ||
| } | ||
| return &smManifest, nil | ||
| } | ||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Any particular reason we apply the same labels here that we match on? Same for the ServiceMonitor. Do we need labels at all (modulo the one to match on for the ServiceMonitor).
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
These labels come from the deployment so I use them to tag the svc/sm too. It is a way to filter resources also from a cli perspective.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
They actually are the selector of the deployment though, right? So they'd be the same labels as on the pods.