-
Notifications
You must be signed in to change notification settings - Fork 630
Create loggers in controllers main function. #303
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
06438e2
9905fe6
d5f789e
6b093cf
1d4fbab
11fbe8a
bdd8375
46bf0ea
2743412
9d68d86
09db04a
5ecf818
2336606
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -25,10 +25,22 @@ import ( | |
| flowsv1alpha1 "github.com/knative/eventing/pkg/apis/flows/v1alpha1" | ||
| "github.com/knative/eventing/pkg/controller/feed" | ||
| "github.com/knative/eventing/pkg/controller/flow" | ||
| "github.com/knative/eventing/pkg/logconfig" | ||
|
|
||
| "github.com/knative/pkg/configmap" | ||
| "github.com/knative/pkg/logging" | ||
| "github.com/knative/pkg/logging/logkey" | ||
|
|
||
| istiov1alpha3 "github.com/knative/serving/pkg/apis/istio/v1alpha3" | ||
| servingv1alpha1 "github.com/knative/serving/pkg/apis/serving/v1alpha1" | ||
| "go.uber.org/zap" | ||
|
|
||
| "k8s.io/apimachinery/pkg/runtime" | ||
| "k8s.io/client-go/kubernetes" | ||
| _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" | ||
| "k8s.io/client-go/rest" | ||
|
|
||
| "github.com/knative/eventing/pkg/system" | ||
| "sigs.k8s.io/controller-runtime/pkg/client/config" | ||
| "sigs.k8s.io/controller-runtime/pkg/controller" | ||
| "sigs.k8s.io/controller-runtime/pkg/manager" | ||
|
|
@@ -44,8 +56,45 @@ type ProvideFunc func(manager.Manager) (controller.Controller, error) | |
|
|
||
| func main() { | ||
| flag.Parse() | ||
|
|
||
| // Read the logging config and setup a logger. | ||
| cm, err := configmap.Load("/etc/config-logging") | ||
| if err != nil { | ||
| log.Fatalf("Error loading logging configuration: %v", err) | ||
| } | ||
| cfg, err := logging.NewConfigFromMap(cm, logconfig.ControllerManager) | ||
| if err != nil { | ||
| log.Fatalf("Error parsing logging configuration: %v", err) | ||
| } | ||
| logger, atomicLevel := logging.NewLoggerFromConfig(cfg, logconfig.ControllerManager) | ||
| defer logger.Sync() | ||
| logger = logger.With(zap.String(logkey.ControllerType, logconfig.ControllerManager)) | ||
|
|
||
| logger.Info("Starting the Controller Manager") | ||
|
|
||
| // This tells controller-runtime to use zap to log internal messages. | ||
| logf.SetLogger(logf.ZapLogger(false)) | ||
|
|
||
| // set up signals so we handle the first shutdown signal gracefully | ||
| stopCh := signals.SetupSignalHandler() | ||
|
|
||
| clusterConfig, err := rest.InClusterConfig() | ||
| if err != nil { | ||
| logger.Fatal("Failed to get in cluster config", err) | ||
| } | ||
|
|
||
| kubeClient, err := kubernetes.NewForConfig(clusterConfig) | ||
| if err != nil { | ||
| logger.Fatal("Failed to get the client set", err) | ||
| } | ||
|
|
||
| // Watch the logging config map and dynamically update logging levels. | ||
| configMapWatcher := configmap.NewDefaultWatcher(kubeClient, system.Namespace) | ||
| configMapWatcher.Watch(logconfig.ConfigName, logging.UpdateLevelFromConfigMap(logger, atomicLevel, logconfig.ControllerManager, logconfig.ControllerManager)) | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It would be nice to be able to encapsulate the 35 new lines in pkg/logging, but I think that's a follow-on PR.
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I agree. Had the same thought as I was adding this. |
||
| if err = configMapWatcher.Start(stopCh); err != nil { | ||
| logger.Fatalf("failed to start controller manager configmap watcher: %v", err) | ||
| } | ||
|
|
||
| // Setup a Manager | ||
| mrg, err := manager.New(config.GetConfigOrDie(), manager.Options{}) | ||
| if err != nil { | ||
|
|
@@ -72,11 +121,12 @@ func main() { | |
| flow.ProvideController, | ||
| } | ||
|
|
||
| // TODO(n3wscott): Send the logger to the controllers. | ||
| for _, provider := range providers { | ||
| if _, err := provider(mrg); err != nil { | ||
| log.Fatal(err) | ||
| } | ||
| } | ||
|
|
||
| log.Fatal(mrg.Start(signals.SetupSignalHandler())) | ||
| log.Fatal(mrg.Start(stopCh)) | ||
| } | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -22,7 +22,6 @@ import ( | |
| "net/http" | ||
| "time" | ||
|
|
||
| "github.com/golang/glog" | ||
| kubeinformers "k8s.io/client-go/informers" | ||
| "k8s.io/client-go/kubernetes" | ||
| "k8s.io/client-go/rest" | ||
|
|
@@ -41,7 +40,14 @@ import ( | |
| "github.com/knative/eventing/pkg/controller/clusterbus" | ||
| "github.com/knative/eventing/pkg/signals" | ||
|
|
||
| "github.com/knative/eventing/pkg/logconfig" | ||
| "github.com/knative/eventing/pkg/system" | ||
| "github.com/knative/pkg/configmap" | ||
| "github.com/knative/pkg/logging" | ||
| "github.com/knative/pkg/logging/logkey" | ||
| "github.com/prometheus/client_golang/prometheus/promhttp" | ||
| "go.uber.org/zap" | ||
| "log" | ||
| ) | ||
|
|
||
| const ( | ||
|
|
@@ -58,27 +64,42 @@ var ( | |
| func main() { | ||
| flag.Parse() | ||
|
|
||
| // Read the logging config and setup a logger. | ||
| cm, err := configmap.Load("/etc/config-logging") | ||
| if err != nil { | ||
| log.Fatalf("Error loading logging configuration: %v", err) | ||
| } | ||
| config, err := logging.NewConfigFromMap(cm, logconfig.Controller) | ||
| if err != nil { | ||
| log.Fatalf("Error parsing logging configuration: %v", err) | ||
| } | ||
| logger, atomicLevel := logging.NewLoggerFromConfig(config, logconfig.Controller) | ||
| defer logger.Sync() | ||
| logger = logger.With(zap.String(logkey.ControllerType, logconfig.Controller)) | ||
|
|
||
| logger.Info("Starting the Controller") | ||
|
|
||
| // set up signals so we handle the first shutdown signal gracefully | ||
| stopCh := signals.SetupSignalHandler() | ||
|
|
||
| cfg, err := clientcmd.BuildConfigFromFlags(masterURL, kubeconfig) | ||
| if err != nil { | ||
| glog.Fatalf("Error building kubeconfig: %s", err.Error()) | ||
| logger.Fatalf("Error building kubeconfig: %s", err.Error()) | ||
| } | ||
|
|
||
| kubeClient, err := kubernetes.NewForConfig(cfg) | ||
| if err != nil { | ||
| glog.Fatalf("Error building kubernetes clientset: %s", err.Error()) | ||
| logger.Fatalf("Error building kubernetes clientset: %s", err.Error()) | ||
| } | ||
|
|
||
| client, err := clientset.NewForConfig(cfg) | ||
| if err != nil { | ||
| glog.Fatalf("Error building clientset: %s", err.Error()) | ||
| logger.Fatalf("Error building clientset: %s", err.Error()) | ||
| } | ||
|
|
||
| servingClient, err := servingclientset.NewForConfig(cfg) | ||
| if err != nil { | ||
| glog.Fatalf("Error building serving clientset: %s", err.Error()) | ||
| logger.Fatalf("Error building serving clientset: %s", err.Error()) | ||
| } | ||
|
|
||
| // TODO: Rip this out from all the controllers since we can get it | ||
|
|
@@ -87,20 +108,28 @@ func main() { | |
| // Kubernetes. Clients will use the Pod's ServiceAccount principal. | ||
| restConfig, err := rest.InClusterConfig() | ||
| if err != nil { | ||
| glog.Fatalf("Error building rest config: %v", err.Error()) | ||
| logger.Fatalf("Error building rest config: %v", err.Error()) | ||
| } | ||
|
|
||
| kubeInformerFactory := kubeinformers.NewSharedInformerFactory(kubeClient, time.Second*30) | ||
| informerFactory := informers.NewSharedInformerFactory(client, time.Second*30) | ||
| servingInformerFactory := servinginformers.NewSharedInformerFactory(servingClient, time.Second*30) | ||
|
|
||
| // Watch the logging config map and dynamically update logging levels. | ||
| configMapWatcher := configmap.NewDefaultWatcher(kubeClient, system.Namespace) | ||
| configMapWatcher.Watch(logconfig.ConfigName, logging.UpdateLevelFromConfigMap(logger, atomicLevel, logconfig.Controller, logconfig.Controller)) | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. That is very cool and will be extremely useful :)
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I'm glad you like it :) |
||
| if err = configMapWatcher.Start(stopCh); err != nil { | ||
| logger.Fatalf("failed to start controller config map watcher: %v", err) | ||
| } | ||
|
|
||
| // Add new controllers here. | ||
| ctors := []controller.Constructor{ | ||
| bus.NewController, | ||
| clusterbus.NewController, | ||
| channel.NewController, | ||
| } | ||
|
|
||
| // TODO(n3wscott): Send the logger to the controllers. | ||
| // Build all of our controllers, with the clients constructed above. | ||
| controllers := make([]controller.Interface, 0, len(ctors)) | ||
| for _, ctor := range ctors { | ||
|
|
@@ -118,7 +147,7 @@ func main() { | |
| // We don't expect this to return until stop is called, | ||
| // but if it does, propagate it back. | ||
| if err := ctrlr.Run(threadsPerController, stopCh); err != nil { | ||
| glog.Fatalf("Error running controller: %s", err.Error()) | ||
| logger.Fatalf("Error running controller: %s", err.Error()) | ||
| } | ||
| }(ctrlr) | ||
| } | ||
|
|
@@ -127,9 +156,9 @@ func main() { | |
| srv := &http.Server{Addr: metricsScrapeAddr} | ||
| http.Handle(metricsScrapePath, promhttp.Handler()) | ||
| go func() { | ||
| glog.Info("Starting metrics listener at %s", metricsScrapeAddr) | ||
| logger.Info("Starting metrics listener at %s", metricsScrapeAddr) | ||
| if err := srv.ListenAndServe(); err != nil { | ||
| glog.Infof("Httpserver: ListenAndServe() finished with error: %s", err) | ||
| logger.Infof("Httpserver: ListenAndServe() finished with error: %s", err) | ||
| } | ||
| }() | ||
|
|
||
|
|
@@ -139,8 +168,6 @@ func main() { | |
| ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) | ||
| defer cancel() | ||
| srv.Shutdown(ctx) | ||
|
|
||
| glog.Flush() | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Hah, I think you caught a bug here. Nice!
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 💃 |
||
| } | ||
|
|
||
| func init() { | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -33,3 +33,10 @@ spec: | |
| "-logtostderr", | ||
| "-stderrthreshold", "INFO", | ||
| ] | ||
| volumeMounts: | ||
| - name: config-logging | ||
| mountPath: /etc/config-logging | ||
| volumes: | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It looks like we're both mounting a ConfigMap, and using the k8s watcher on the ConfigMap object (because you're feeding in a k8s client). We should just be able to use inotify on the file: https://github.com/fsnotify/fsnotify looks promising.
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The autoscaling group found that
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. do tell more !
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I vote make this an issue and do a followup pass. Having loggers is more valuable than optimizing i think
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yes, this was intended to be a followup, not a blocker. |
||
| - name: config-logging | ||
| configMap: | ||
| name: config-logging | ||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Not an issue w/ your PR, but we should consider renaming these binaries slightly; 'controller' runs the bus controllers and 'controller-manager' runs the flow, feed, etc controllers.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I'll create an issue for this.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I have some code that combines these into a single deployment, but haven't PR'd it: master...grantr:combine-controllers
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
#313