diff --git a/Gopkg.lock b/Gopkg.lock
index afa868264976..4292dbaabebe 100644
--- a/Gopkg.lock
+++ b/Gopkg.lock
@@ -952,6 +952,6 @@
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
- inputs-digest = "e0cca64b11ad0438011753c10c7b31158051dfbef454f31c5d00c3c5ee1d70d2"
+ inputs-digest = "621e4f5bec0b084d127193a1a2ec95460b8368224d81f7c2611900a91c1bbb12"
solver-name = "gps-cdcl"
solver-version = 1
diff --git a/cmd/autoscaler/BUILD.bazel b/cmd/autoscaler/BUILD.bazel
index dd095f10b9ba..21e3d1934e46 100644
--- a/cmd/autoscaler/BUILD.bazel
+++ b/cmd/autoscaler/BUILD.bazel
@@ -6,14 +6,17 @@ go_library(
importpath = "github.com/knative/serving/cmd/autoscaler",
visibility = ["//visibility:private"],
deps = [
+ "//cmd/util:go_default_library",
"//pkg/apis/serving/v1alpha1:go_default_library",
"//pkg/autoscaler:go_default_library",
"//pkg/client/clientset/versioned:go_default_library",
- "//vendor/github.com/golang/glog:go_default_library",
+ "//pkg/logging:go_default_library",
+ "//pkg/logging/logkey:go_default_library",
"//vendor/github.com/gorilla/websocket:go_default_library",
"//vendor/github.com/josephburnett/k8sflag/pkg/k8sflag:go_default_library",
"//vendor/go.opencensus.io/exporter/prometheus:go_default_library",
"//vendor/go.opencensus.io/stats/view:go_default_library",
+ "//vendor/go.uber.org/zap:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
diff --git a/cmd/autoscaler/main.go b/cmd/autoscaler/main.go
index b1a7e6df9a39..c3b696cf8462 100644
--- a/cmd/autoscaler/main.go
+++ b/cmd/autoscaler/main.go
@@ -17,22 +17,24 @@ package main
import (
"bytes"
+ "context"
"encoding/gob"
"flag"
- "log"
"net/http"
- "os"
"time"
"go.opencensus.io/exporter/prometheus"
"go.opencensus.io/stats/view"
+ "go.uber.org/zap"
- "github.com/josephburnett/k8sflag/pkg/k8sflag"
+ "github.com/knative/serving/cmd/util"
"github.com/knative/serving/pkg/apis/serving/v1alpha1"
ela_autoscaler "github.com/knative/serving/pkg/autoscaler"
clientset "github.com/knative/serving/pkg/client/clientset/versioned"
+ "github.com/knative/serving/pkg/logging"
+ "github.com/knative/serving/pkg/logging/logkey"
+ "github.com/josephburnett/k8sflag/pkg/k8sflag"
- "github.com/golang/glog"
"github.com/gorilla/websocket"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -64,46 +66,24 @@ var (
elaConfig string
elaRevision string
elaAutoscalerPort string
+ logger *zap.SugaredLogger
// Revision-level configuration
concurrencyModel = flag.String("concurrencyModel", string(v1alpha1.RevisionRequestConcurrencyModelMulti), "")
// Cluster-level configuration
- enableScaleToZero = k8sflag.Bool("autoscale.enable-scale-to-zero", false)
- multiConcurrencyTarget = k8sflag.Float64("autoscale.multi-concurrency-target", 0.0, k8sflag.Required)
- singleConcurrencyTarget = k8sflag.Float64("autoscale.single-concurrency-target", 0.0, k8sflag.Required)
+ autoscaleFlagSet = k8sflag.NewFlagSet("/etc/config-autoscaler")
+ enableScaleToZero = autoscaleFlagSet.Bool("enable-scale-to-zero", false)
+ multiConcurrencyTarget = autoscaleFlagSet.Float64("multi-concurrency-target", 0.0, k8sflag.Required)
+ singleConcurrencyTarget = autoscaleFlagSet.Float64("single-concurrency-target", 0.0, k8sflag.Required)
)
-func init() {
- elaNamespace = os.Getenv("ELA_NAMESPACE")
- if elaNamespace == "" {
- glog.Fatal("No ELA_NAMESPACE provided.")
- }
- glog.Infof("ELA_NAMESPACE=%v", elaNamespace)
-
- elaDeployment = os.Getenv("ELA_DEPLOYMENT")
- if elaDeployment == "" {
- glog.Fatal("No ELA_DEPLOYMENT provided.")
- }
- glog.Infof("ELA_DEPLOYMENT=%v", elaDeployment)
-
- elaConfig = os.Getenv("ELA_CONFIGURATION")
- if elaConfig == "" {
- glog.Fatal("No ELA_CONFIGURATION provided.")
- }
- glog.Infof("ELA_CONFIGURATION=%v", elaConfig)
-
- elaRevision = os.Getenv("ELA_REVISION")
- if elaRevision == "" {
- glog.Fatal("No ELA_REVISION provided.")
- }
- glog.Infof("ELA_REVISION=%v", elaRevision)
-
- elaAutoscalerPort = os.Getenv("ELA_AUTOSCALER_PORT")
- if elaAutoscalerPort == "" {
- glog.Fatal("No ELA_AUTOSCALER_PORT provided.")
- }
- glog.Infof("ELA_AUTOSCALER_PORT=%v", elaAutoscalerPort)
+func initEnv() {
+ elaNamespace = util.GetRequiredEnvOrFatal("ELA_NAMESPACE", logger)
+ elaDeployment = util.GetRequiredEnvOrFatal("ELA_DEPLOYMENT", logger)
+ elaConfig = util.GetRequiredEnvOrFatal("ELA_CONFIGURATION", logger)
+ elaRevision = util.GetRequiredEnvOrFatal("ELA_REVISION", logger)
+ elaAutoscalerPort = util.GetRequiredEnvOrFatal("ELA_AUTOSCALER_PORT", logger)
}
func autoscaler() {
@@ -114,22 +94,23 @@ func autoscaler() {
case string(v1alpha1.RevisionRequestConcurrencyModelMulti):
targetConcurrency = multiConcurrencyTarget
default:
- log.Fatalf("Unrecognized concurrency model: " + *concurrencyModel)
+ logger.Fatalf("Unrecognized concurrency model: " + *concurrencyModel)
}
config := ela_autoscaler.Config{
TargetConcurrency: targetConcurrency,
- MaxScaleUpRate: k8sflag.Float64("autoscale.max-scale-up-rate", 0.0, k8sflag.Required),
- StableWindow: k8sflag.Duration("autoscale.stable-window", nil, k8sflag.Required),
- PanicWindow: k8sflag.Duration("autoscale.panic-window", nil, k8sflag.Required),
- ScaleToZeroThreshold: k8sflag.Duration("autoscale.scale-to-zero-threshold", nil, k8sflag.Required, k8sflag.Dynamic),
+ MaxScaleUpRate: autoscaleFlagSet.Float64("max-scale-up-rate", 0.0, k8sflag.Required),
+ StableWindow: autoscaleFlagSet.Duration("stable-window", nil, k8sflag.Required),
+ PanicWindow: autoscaleFlagSet.Duration("panic-window", nil, k8sflag.Required),
+ ScaleToZeroThreshold: autoscaleFlagSet.Duration("scale-to-zero-threshold", nil, k8sflag.Required, k8sflag.Dynamic),
}
a := ela_autoscaler.NewAutoscaler(config, statsReporter)
ticker := time.NewTicker(2 * time.Second)
+ ctx := logging.WithLogger(context.TODO(), logger)
for {
select {
case <-ticker.C:
- scale, ok := a.Scale(time.Now())
+ scale, ok := a.Scale(ctx, time.Now())
if ok {
// Flag guard scale to zero.
if !enableScaleToZero.Get() && scale == 0 {
@@ -144,7 +125,7 @@ func autoscaler() {
}
}
case s := <-statChan:
- a.Record(s)
+ a.Record(ctx, s)
}
}
}
@@ -161,7 +142,7 @@ func scaleSerializer() {
for {
select {
case p := <-scaleChan:
- glog.Warning("Scaling is not keeping up with autoscaling requests.")
+ logger.Info("Scaling is not keeping up with autoscaling requests.")
desiredPodCount = p
default:
break FastForward
@@ -176,10 +157,10 @@ func scaleTo(podCount int32) {
dc := kubeClient.ExtensionsV1beta1().Deployments(elaNamespace)
deployment, err := dc.Get(elaDeployment, metav1.GetOptions{})
if err != nil {
- glog.Error("Error getting Deployment %q: %s", elaDeployment, err)
+ logger.Error("Error getting Deployment %q: %s", elaDeployment, zap.Error(err))
return
}
- glog.Infof("===SCALE=== %v %v %v %v %v",
+ logger.Debugf("===SCALE=== %v %v %v %v %v",
time.Now().Unix(),
podCount,
deployment.Status.Replicas,
@@ -193,33 +174,33 @@ func scaleTo(podCount int32) {
return
}
- glog.Infof("Scaling to %v", podCount)
+ logger.Infof("Scaling to %v", podCount)
if podCount == 0 {
revisionClient := elaClient.ServingV1alpha1().Revisions(elaNamespace)
revision, err := revisionClient.Get(elaRevision, metav1.GetOptions{})
if err != nil {
- glog.Errorf("Error getting Revision %q: %s", elaRevision, err)
+ logger.Errorf("Error getting Revision %q: %s", elaRevision, zap.Error(err))
}
revision.Spec.ServingState = v1alpha1.RevisionServingStateReserve
revision, err = revisionClient.Update(revision)
if err != nil {
- glog.Errorf("Error updating Revision %q: %s", elaRevision, err)
+ logger.Errorf("Error updating Revision %q: %s", elaRevision, zap.Error(err))
}
}
deployment.Spec.Replicas = &podCount
_, err = dc.Update(deployment)
if err != nil {
- glog.Errorf("Error updating Deployment %q: %s", elaDeployment, err)
+ logger.Errorf("Error updating Deployment %q: %s", elaDeployment, err)
}
- glog.Info("Successfully scaled.")
+ logger.Info("Successfully scaled.")
}
func handler(w http.ResponseWriter, r *http.Request) {
conn, err := upgrader.Upgrade(w, r, nil)
if err != nil {
- glog.Error(err)
+ logger.Error("Failed to upgrade http connection to websocket", zap.Error(err))
return
}
for {
@@ -228,14 +209,14 @@ func handler(w http.ResponseWriter, r *http.Request) {
return
}
if messageType != websocket.BinaryMessage {
- glog.Error("Dropping non-binary message.")
+ logger.Error("Dropping non-binary message.")
continue
}
dec := gob.NewDecoder(bytes.NewBuffer(msg))
var stat ela_autoscaler.Stat
err = dec.Decode(&stat)
if err != nil {
- glog.Error(err)
+ logger.Error("Failed to decode stats", zap.Error(err))
continue
}
statChan <- stat
@@ -244,33 +225,42 @@ func handler(w http.ResponseWriter, r *http.Request) {
func main() {
flag.Parse()
- glog.Info("Autoscaler up")
+ logger = logging.NewLoggerFromDefaultConfigMap("loglevel.autoscaler").Named("ela-autoscaler")
+ defer logger.Sync()
+
+ initEnv()
+ logger = logger.With(
+ zap.String(logkey.Namespace, elaNamespace),
+ zap.String(logkey.Configuration, elaConfig),
+ zap.String(logkey.Revision, elaRevision))
+
+ logger.Info("Starting autoscaler")
config, err := rest.InClusterConfig()
if err != nil {
- glog.Fatal(err)
+ logger.Fatal("Failed to get in cluster configuration", zap.Error(err))
}
config.Timeout = time.Duration(5 * time.Second)
kc, err := kubernetes.NewForConfig(config)
if err != nil {
- glog.Fatal(err)
+ logger.Fatal("Failed to create a new clientset", zap.Error(err))
}
kubeClient = kc
ec, err := clientset.NewForConfig(config)
if err != nil {
- glog.Fatal(err)
+ logger.Fatal("Failed to create a new clientset", zap.Error(err))
}
elaClient = ec
exporter, err := prometheus.NewExporter(prometheus.Options{Namespace: "autoscaler"})
if err != nil {
- glog.Fatal(err)
+ logger.Fatal("Failed to create prometheus exporter", zap.Error(err))
}
view.RegisterExporter(exporter)
view.SetReportingPeriod(1 * time.Second)
reporter, err := ela_autoscaler.NewStatsReporter(elaNamespace, elaConfig, elaRevision)
if err != nil {
- glog.Fatal(err)
+ logger.Fatal("Failed to create stats reporter", zap.Error(err))
}
statsReporter = reporter
diff --git a/cmd/controller/main.go b/cmd/controller/main.go
index c29950729e7d..cf1af84cd717 100644
--- a/cmd/controller/main.go
+++ b/cmd/controller/main.go
@@ -57,20 +57,25 @@ var (
queueSidecarImage string
autoscalerImage string
- autoscaleConcurrencyQuantumOfTime = k8sflag.Duration("autoscale.concurrency-quantum-of-time", nil, k8sflag.Required)
- autoscaleEnableScaleToZero = k8sflag.Bool("autoscale.enable-scale-to-zero", false)
- autoscaleEnableSingleConcurrency = k8sflag.Bool("autoscale.enable-single-concurrency", false)
-
- loggingEnableVarLogCollection = k8sflag.Bool("logging.enable-var-log-collection", false)
- loggingFluentSidecarImage = k8sflag.String("logging.fluentd-sidecar-image", "")
- loggingFluentSidecarOutputConfig = k8sflag.String("logging.fluentd-sidecar-output-config", "")
- loggingURLTemplate = k8sflag.String("logging.revision-url-template", "")
- loggingZapCfg = k8sflag.String("logging.ela-controller.zap-config", "")
+ autoscaleFlagSet = k8sflag.NewFlagSet("/etc/config-autoscaler")
+ autoscaleConcurrencyQuantumOfTime = autoscaleFlagSet.Duration("concurrency-quantum-of-time", nil, k8sflag.Required)
+ autoscaleEnableScaleToZero = autoscaleFlagSet.Bool("enable-scale-to-zero", false)
+ autoscaleEnableSingleConcurrency = autoscaleFlagSet.Bool("enable-single-concurrency", false)
+
+ observabilityFlagSet = k8sflag.NewFlagSet("/etc/config-observability")
+ loggingEnableVarLogCollection = observabilityFlagSet.Bool("logging.enable-var-log-collection", false)
+ loggingFluentSidecarImage = observabilityFlagSet.String("logging.fluentd-sidecar-image", "")
+ loggingFluentSidecarOutputConfig = observabilityFlagSet.String("logging.fluentd-sidecar-output-config", "")
+ loggingURLTemplate = observabilityFlagSet.String("logging.revision-url-template", "")
+
+ loggingFlagSet = k8sflag.NewFlagSet("/etc/config-logging")
+ zapConfig = loggingFlagSet.String("zap-logger-config", "")
+ queueProxyLoggingLevel = loggingFlagSet.String("loglevel.queueproxy", "")
)
func main() {
flag.Parse()
- logger := logging.NewLogger(loggingZapCfg.Get()).Named("ela-controller")
+ logger := logging.NewLoggerFromDefaultConfigMap("loglevel.controller").Named("ela-controller")
defer logger.Sync()
if loggingEnableVarLogCollection.Get() {
@@ -139,6 +144,9 @@ func main() {
FluentdSidecarImage: loggingFluentSidecarImage.Get(),
FluentdSidecarOutputConfig: loggingFluentSidecarOutputConfig.Get(),
LoggingURLTemplate: loggingURLTemplate.Get(),
+
+ QueueProxyLoggingConfig: zapConfig.Get(),
+ QueueProxyLoggingLevel: queueProxyLoggingLevel.Get(),
}
// Build all of our controllers, with the clients constructed above.
diff --git a/cmd/queue/BUILD.bazel b/cmd/queue/BUILD.bazel
index 45e85efca770..e62559ffbb8a 100644
--- a/cmd/queue/BUILD.bazel
+++ b/cmd/queue/BUILD.bazel
@@ -6,11 +6,14 @@ go_library(
importpath = "github.com/knative/serving/cmd/queue",
visibility = ["//visibility:private"],
deps = [
+ "//cmd/util:go_default_library",
"//pkg/apis/serving/v1alpha1:go_default_library",
"//pkg/autoscaler:go_default_library",
+ "//pkg/logging:go_default_library",
+ "//pkg/logging/logkey:go_default_library",
"//pkg/queue:go_default_library",
- "//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/gorilla/websocket:go_default_library",
+ "//vendor/go.uber.org/zap:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
],
diff --git a/cmd/queue/main.go b/cmd/queue/main.go
index 31615343e16f..14ce64f2e68f 100644
--- a/cmd/queue/main.go
+++ b/cmd/queue/main.go
@@ -32,11 +32,14 @@ import (
"syscall"
"time"
+ "github.com/knative/serving/cmd/util"
"github.com/knative/serving/pkg/apis/serving/v1alpha1"
"github.com/knative/serving/pkg/autoscaler"
+ "github.com/knative/serving/pkg/logging"
+ "github.com/knative/serving/pkg/logging/logkey"
"github.com/knative/serving/pkg/queue"
+ "go.uber.org/zap"
- "github.com/golang/glog"
"github.com/gorilla/websocket"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
@@ -64,6 +67,8 @@ const (
var (
podName string
+ elaNamespace string
+ elaConfiguration string
elaRevision string
elaAutoscaler string
elaAutoscalerPort string
@@ -73,47 +78,25 @@ var (
kubeClient *kubernetes.Clientset
statSink *websocket.Conn
proxy *httputil.ReverseProxy
+ logger *zap.SugaredLogger
concurrencyQuantumOfTime = flag.Duration("concurrencyQuantumOfTime", 100*time.Millisecond, "")
concurrencyModel = flag.String("concurrencyModel", string(v1alpha1.RevisionRequestConcurrencyModelMulti), "")
singleConcurrencyBreaker = queue.NewBreaker(singleConcurrencyQueueDepth, 1)
)
-func init() {
- podName = os.Getenv("ELA_POD")
- if podName == "" {
- glog.Fatal("No ELA_POD provided.")
- }
- glog.Infof("ELA_POD=%v", podName)
-
- elaRevision = os.Getenv("ELA_REVISION")
- if elaRevision == "" {
- glog.Fatal("No ELA_REVISION provided.")
- }
- glog.Infof("ELA_REVISION=%v", elaRevision)
-
- elaAutoscaler = os.Getenv("ELA_AUTOSCALER")
- if elaAutoscaler == "" {
- glog.Fatal("No ELA_AUTOSCALER provided.")
- }
- glog.Infof("ELA_AUTOSCALER=%v", elaRevision)
-
- elaAutoscalerPort = os.Getenv("ELA_AUTOSCALER_PORT")
- if elaAutoscalerPort == "" {
- glog.Fatal("No ELA_AUTOSCALER_PORT provided.")
- }
- glog.Infof("ELA_AUTOSCALER_PORT=%v", elaAutoscalerPort)
-
- target, err := url.Parse("http://localhost:8080")
- if err != nil {
- glog.Fatal(err)
- }
- proxy = httputil.NewSingleHostReverseProxy(target)
+func initEnv() {
+ podName = util.GetRequiredEnvOrFatal("ELA_POD", logger)
+ elaNamespace = util.GetRequiredEnvOrFatal("ELA_NAMESPACE", logger)
+ elaConfiguration = util.GetRequiredEnvOrFatal("ELA_CONFIGURATION", logger)
+ elaRevision = util.GetRequiredEnvOrFatal("ELA_REVISION", logger)
+ elaAutoscaler = util.GetRequiredEnvOrFatal("ELA_AUTOSCALER", logger)
+ elaAutoscalerPort = util.GetRequiredEnvOrFatal("ELA_AUTOSCALER_PORT", logger)
}
func connectStatSink() {
autoscalerEndpoint := fmt.Sprintf("ws://%s.%s.svc.cluster.local:%s",
elaAutoscaler, queue.AutoscalerNamespace, elaAutoscalerPort)
- glog.Infof("Connecting to autoscaler at %s.", autoscalerEndpoint)
+ logger.Infof("Connecting to autoscaler at %s.", autoscalerEndpoint)
for {
// Everything is coming up at the same time. We wait a
// second first to let the autoscaler start serving. And
@@ -126,13 +109,12 @@ func connectStatSink() {
}
conn, _, err := dialer.Dial(autoscalerEndpoint, nil)
if err != nil {
- glog.Error(err)
+ logger.Error("Retrying connection to autoscaler.", zap.Error(err))
} else {
- glog.Info("Connected to stat sink.")
+ logger.Info("Connected to stat sink.")
statSink = conn
return
}
- glog.Error("Retrying connection to autoscaler.")
}
}
@@ -140,21 +122,20 @@ func statReporter() {
for {
s := <-statChan
if statSink == nil {
- glog.Error("Stat sink not connected.")
+ logger.Error("Stat sink not connected.")
continue
}
var b bytes.Buffer
enc := gob.NewEncoder(&b)
err := enc.Encode(s)
if err != nil {
- glog.Error(err)
+ logger.Error("Failed to encode data from stats channel", zap.Error(err))
continue
}
err = statSink.WriteMessage(websocket.BinaryMessage, b.Bytes())
if err != nil {
- glog.Error(err)
+ logger.Error("Failed to write to stat sink. Attempting to reconnect to stat sink.", zap.Error(err))
statSink = nil
- glog.Error("Attempting reconnection to stat sink.")
go connectStatSink()
continue
}
@@ -258,17 +239,30 @@ func setupAdminHandlers(server *http.Server) {
}
func main() {
- // Even though we have no flags, glog has some hence requiring
- // flag.Parse().
- flag.Parse()
- glog.Info("Queue container is running")
+ logger = logging.NewLogger(os.Getenv("ELA_LOGGING_CONFIG"), os.Getenv("ELA_LOGGING_LEVEL")).Named("ela-queueproxy")
+ defer logger.Sync()
+
+ initEnv()
+ logger = logger.With(
+ zap.String(logkey.Namespace, elaNamespace),
+ zap.String(logkey.Configuration, elaConfiguration),
+ zap.String(logkey.Revision, elaRevision),
+ zap.String(logkey.Pod, podName))
+
+ target, err := url.Parse("http://localhost:8080")
+ if err != nil {
+ logger.Fatal("Failed to parse localhost url", zap.Error(err))
+ }
+ proxy = httputil.NewSingleHostReverseProxy(target)
+
+ logger.Info("Queue container is starting")
config, err := rest.InClusterConfig()
if err != nil {
- glog.Fatalf("Error getting in cluster config: %v", err)
+ logger.Fatal("Error getting in cluster config", zap.Error(err))
}
kc, err := kubernetes.NewForConfig(config)
if err != nil {
- glog.Fatalf("Error creating new config: %v", err)
+ logger.Fatal("Error creating new config", zap.Error(err))
}
kubeClient = kc
go connectStatSink()
diff --git a/cmd/util/BUILD.bazel b/cmd/util/BUILD.bazel
new file mode 100644
index 000000000000..5634d1180ed5
--- /dev/null
+++ b/cmd/util/BUILD.bazel
@@ -0,0 +1,9 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+go_library(
+ name = "go_default_library",
+ srcs = ["env.go"],
+ importpath = "github.com/knative/serving/cmd/util",
+ visibility = ["//visibility:public"],
+ deps = ["//vendor/go.uber.org/zap:go_default_library"],
+)
diff --git a/cmd/util/env.go b/cmd/util/env.go
new file mode 100644
index 000000000000..f948aa32392a
--- /dev/null
+++ b/cmd/util/env.go
@@ -0,0 +1,33 @@
+/*
+Copyright 2018 Google Inc. All Rights Reserved.
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package util
+
+import (
+ "os"
+
+ "go.uber.org/zap"
+)
+
+// GetRequiredEnvOrFatal tries to get the value of an environment variable.
+// If the value is empty, it logs an error and calls os.Exit(1).
+func GetRequiredEnvOrFatal(key string, logger *zap.SugaredLogger) string {
+ value := os.Getenv(key)
+ if value == "" {
+ logger.Fatalf("No %v provided", key)
+ }
+ logger.Infof("%v=%v", key, value)
+ return value
+}
diff --git a/cmd/webhook/BUILD.bazel b/cmd/webhook/BUILD.bazel
index 7c8cb7f749ae..a0e21b294963 100644
--- a/cmd/webhook/BUILD.bazel
+++ b/cmd/webhook/BUILD.bazel
@@ -9,7 +9,6 @@ go_library(
"//pkg/logging:go_default_library",
"//pkg/signals:go_default_library",
"//pkg/webhook:go_default_library",
- "//vendor/github.com/josephburnett/k8sflag/pkg/k8sflag:go_default_library",
"//vendor/go.uber.org/zap:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
diff --git a/cmd/webhook/main.go b/cmd/webhook/main.go
index 4fb7bbd9fbdb..4dd5d7a535d7 100644
--- a/cmd/webhook/main.go
+++ b/cmd/webhook/main.go
@@ -23,7 +23,6 @@ import (
"github.com/knative/serving/pkg/logging"
"github.com/knative/serving/pkg/signals"
"github.com/knative/serving/pkg/webhook"
- "github.com/josephburnett/k8sflag/pkg/k8sflag"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
@@ -31,8 +30,7 @@ import (
func main() {
flag.Parse()
- loggingZapCfg := k8sflag.String("logging.zap-config", "")
- logger := logging.NewLogger(loggingZapCfg.Get()).Named("ela-webhook")
+ logger := logging.NewLoggerFromDefaultConfigMap("loglevel.webhook").Named("ela-webhook")
defer logger.Sync()
logger.Info("Starting the Configuration Webhook")
diff --git a/config/BUILD.bazel b/config/BUILD.bazel
index b7bbe2577eaa..94c6c5c3e698 100644
--- a/config/BUILD.bazel
+++ b/config/BUILD.bazel
@@ -3,13 +3,23 @@ package(default_visibility = ["//visibility:public"])
load("@k8s_object//:defaults.bzl", "k8s_object")
k8s_object(
- name = "elaconfig",
- template = "elaconfig.yaml",
+ name = "config-domain",
+ template = "config-domain.yaml",
)
k8s_object(
- name = "elawebhookconfig",
- template = "elawebhookconfig.yaml",
+ name = "config-autoscaler",
+ template = "config-autoscaler.yaml",
+)
+
+k8s_object(
+ name = "config-logging",
+ template = "config-logging.yaml",
+)
+
+k8s_object(
+ name = "config-observability",
+ template = "config-observability.yaml",
)
k8s_object(
@@ -114,8 +124,10 @@ k8s_objects(
":namespace",
":authz",
":crds",
- ":elaconfig",
- ":elawebhookconfig",
+ ":config-domain",
+ ":config-autoscaler",
+ ":config-logging",
+ ":config-observability",
":controller",
":controllerservice",
":webhook",
diff --git a/config/config-autoscaler.yaml b/config/config-autoscaler.yaml
new file mode 100644
index 000000000000..7408eeb12a60
--- /dev/null
+++ b/config/config-autoscaler.yaml
@@ -0,0 +1,59 @@
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: config-autoscaler
+ namespace: ela-system
+data:
+ # Static parameters:
+
+ # Target concurrency is the desired number of concurrent requests for
+ # each pod. This is the primary knob for fast autoscaling which will
+ # try achieve an concurrency per pod of the target
+ # concurrency. Single-concurrency must target a value close to 1.0.
+ multi-concurrency-target: "1.0"
+ single-concurrency-target: "0.9"
+
+ # When operating in a stable mode, the autoscaler operates on the
+ # average concurrency over the stable window.
+ stable-window: "60s"
+
+ # When observed average concurrency during the panic window reaches 2x
+ # the target concurrency, the autoscaler enters panic mode. When
+ # operating in panic mode, the autoscaler operates on the average
+ # concurrency over the panic window.
+ panic-window: "6s"
+
+ # Max scale up rate limits the rate at which the autoscaler will
+ # increase pod count. It is the maximum ratio of desired pods versus
+ # observed pods.
+ max-scale-up-rate: "10"
+
+ # Concurrency quantum of time is the minimum time a request consumes
+ # for the purpose of reporting concurrency metrics. The number of
+ # quantums of time that fit into 1 second determines the max QPS the
+ # pod will handle. Requests can span multiple quantums of time in
+ # which case the request duration is rounded up.
+ concurrency-quantum-of-time: "100ms"
+
+ # Scale to zero feature flag
+ enable-scale-to-zero: "false"
+
+ # Dynamic parameters (take effect when config map is updated):
+
+ # Scale to zero threshold is the time a revision must be idle before
+ # it is scaled to zero.
+ scale-to-zero-threshold: "5m"
diff --git a/config/config-domain.yaml b/config/config-domain.yaml
new file mode 100644
index 000000000000..780e45e60e0c
--- /dev/null
+++ b/config/config-domain.yaml
@@ -0,0 +1,29 @@
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: config-domain
+ namespace: ela-system
+data:
+ # These are example settings of domain.
+ # prod-domain.com will be used for routes having app=prod.
+ prod-domain.com: |
+ selector:
+ app: prod
+ # Default value for domain, for routes that does not have app=prod labels.
+ # Although it will match all routes, it is the least-specific rule so it
+ # will only be used if no other domain matches.
+ demo-domain.com: |
diff --git a/config/elawebhookconfig.yaml b/config/config-logging.yaml
similarity index 83%
rename from config/elawebhookconfig.yaml
rename to config/config-logging.yaml
index 1bbad46e0363..3aef50bc6c8a 100644
--- a/config/elawebhookconfig.yaml
+++ b/config/config-logging.yaml
@@ -15,11 +15,11 @@
apiVersion: v1
kind: ConfigMap
metadata:
- name: ela-webhook-config
+ name: config-logging
namespace: ela-system
data:
- # Logging configuration
- logging.zap-config: |
+ # Common configuration for all Elafros codebase
+ zap-logger-config: |
{
"level": "info",
"development": false,
@@ -44,3 +44,9 @@ data:
"callerEncoder": ""
}
}
+
+ # Log level overrides
+ loglevel.controller: "info"
+ loglevel.autoscaler: "info"
+ loglevel.queueproxy: "info"
+ loglevel.webhook: "info"
diff --git a/config/config-observability.yaml b/config/config-observability.yaml
new file mode 100644
index 000000000000..092ceca76003
--- /dev/null
+++ b/config/config-observability.yaml
@@ -0,0 +1,62 @@
+# Copyright 2018 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: config-observability
+ namespace: ela-system
+data:
+ # LOGGING CONFIGURATION
+
+ # Static parameters:
+
+ # A fluentd sidecar will be set up to collect var log if this flag is true.
+ logging.enable-var-log-collection: "true"
+
+ # The fluentd sidecar image used to collect logs from /var/log as a sidecar.
+ # Must be presented if logging.enable-var-log-collection is true.
+ logging.fluentd-sidecar-image: "k8s.gcr.io/fluentd-elasticsearch:v2.0.4"
+
+ # The fluentd sidecar output config to specify logging destination.
+ logging.fluentd-sidecar-output-config: |
+
+ @id elasticsearch
+ @type elasticsearch
+ @log_level info
+ include_tag_key true
+ # Elasticsearch service is in monitoring namespace.
+ host elasticsearch-logging.monitoring
+ port 9200
+ logstash_format true
+
+ @type file
+ path /var/log/fluentd-buffers/kubernetes.system.buffer
+ flush_mode interval
+ retry_type exponential_backoff
+ flush_thread_count 2
+ flush_interval 5s
+ retry_forever
+ retry_max_interval 30
+ chunk_limit_size 2M
+ queue_limit_length 8
+ overflow_action block
+
+
+
+ # The revision log url template, where ${REVISION_UID} will be replaced by the actual
+ # revision uid. For the url to work you'll need to set up monitoring and have access to
+ # the kibana dashboard using `kubectl proxy`.
+ logging.revision-url-template: |
+ http://localhost:8001/api/v1/namespaces/monitoring/services/kibana-logging/proxy/app/kibana#/discover?_a=(query:(match:(kubernetes.labels.knative-dev%2FrevisionUID:(query:'${REVISION_UID}',type:phrase))))
diff --git a/config/controller.yaml b/config/controller.yaml
index ba0c19d047d1..ff8c005512db 100644
--- a/config/controller.yaml
+++ b/config/controller.yaml
@@ -43,9 +43,19 @@ spec:
- name: metrics
containerPort: 9090
volumeMounts:
- - name: ela-config
- mountPath: /etc/config
+ - name: config-autoscaler
+ mountPath: /etc/config-autoscaler
+ - name: config-logging
+ mountPath: /etc/config-logging
+ - name: config-observability
+ mountPath: /etc/config-observability
volumes:
- - name: ela-config
+ - name: config-autoscaler
configMap:
- name: ela-config
+ name: config-autoscaler
+ - name: config-logging
+ configMap:
+ name: config-logging
+ - name: config-observability
+ configMap:
+ name: config-observability
diff --git a/config/elaconfig.yaml b/config/elaconfig.yaml
deleted file mode 100644
index a239b9f8877c..000000000000
--- a/config/elaconfig.yaml
+++ /dev/null
@@ -1,142 +0,0 @@
-# Copyright 2018 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# https://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-apiVersion: v1
-kind: ConfigMap
-metadata:
- name: ela-config
- namespace: ela-system
-data:
- # These are example settings of domain.
- # prod-domain.com will be used for routes having app=prod.
- prod-domain.com: |
- selector:
- app: prod
- # Default value for domain, for routes that does not have app=prod labels.
- # Although it will match all routes, it is the least-specific rule so it
- # will only be used if no other domain matches.
- demo-domain.com: |
-
- # AUTOSCALER CONFIGURATION
-
- # Static parameters:
-
- # Target concurrency is the desired number of concurrent requests for
- # each pod. This is the primary knob for fast autoscaling which will
- # try achieve an concurrency per pod of the target
- # concurrency. Single-concurrency must target a value close to 1.0.
- autoscale.multi-concurrency-target: "1.0"
- autoscale.single-concurrency-target: "0.9"
-
- # When operating in a stable mode, the autoscaler operates on the
- # average concurrency over the stable window.
- autoscale.stable-window: "60s"
-
- # When observed average concurrency during the panic window reaches 2x
- # the target concurrency, the autoscaler enters panic mode. When
- # operating in panic mode, the autoscaler operates on the average
- # concurrency over the panic window.
- autoscale.panic-window: "6s"
-
- # Max scale up rate limits the rate at which the autoscaler will
- # increase pod count. It is the maximum ratio of desired pods versus
- # observed pods.
- autoscale.max-scale-up-rate: "10"
-
- # Concurrency quantum of time is the minimum time a request consumes
- # for the purpose of reporting concurrency metrics. The number of
- # quantums of time that fit into 1 second determines the max QPS the
- # pod will handle. Requests can span multiple quantums of time in
- # which case the request duration is rounded up.
- autoscale.concurrency-quantum-of-time: "100ms"
-
- # Scale to zero feature flag
- autoscale.enable-scale-to-zero: "false"
-
- # Dynamic parameters (take effect when config map is updated):
-
- # Scale to zero threshold is the time a revision must be idle before
- # it is scaled to zero.
- autoscale.scale-to-zero-threshold: "5m"
-
-
- # LOGGING CONFIGURATION
-
- # Static parameters:
-
- # A fluentd sidecar will be set up to collect var log if this flag is true.
- logging.enable-var-log-collection: "true"
-
- # The fluentd sidecar image used to collect logs from /var/log as a sidecar.
- # Must be presented if logging.enable-var-log-collection is true.
- logging.fluentd-sidecar-image: "k8s.gcr.io/fluentd-elasticsearch:v2.0.4"
-
- # The fluentd sidecar output config to specify logging destination.
- logging.fluentd-sidecar-output-config: |
-
- @id elasticsearch
- @type elasticsearch
- @log_level info
- include_tag_key true
- # Elasticsearch service is in monitoring namespace.
- host elasticsearch-logging.monitoring
- port 9200
- logstash_format true
-
- @type file
- path /var/log/fluentd-buffers/kubernetes.system.buffer
- flush_mode interval
- retry_type exponential_backoff
- flush_thread_count 2
- flush_interval 5s
- retry_forever
- retry_max_interval 30
- chunk_limit_size 2M
- queue_limit_length 8
- overflow_action block
-
-
-
- # The revision log url template, where ${REVISION_UID} will be replaced by the actual
- # revision uid. For the url to work you'll need to set up monitoring and have access to
- # the kibana dashboard using `kubectl proxy`.
- logging.revision-url-template: |
- http://localhost:8001/api/v1/namespaces/monitoring/services/kibana-logging/proxy/app/kibana#/discover?_a=(query:(match:(kubernetes.labels.knative-dev%2FrevisionUID:(query:'${REVISION_UID}',type:phrase))))
-
- # Configuration for Elafros controllers
- logging.ela-controller.zap-config: |
- {
- "level": "info",
- "development": false,
- "sampling": {
- "initial": 100,
- "thereafter": 100
- },
- "outputPaths": ["stdout"],
- "errorOutputPaths": ["stderr"],
- "encoding": "json",
- "encoderConfig": {
- "timeKey": "",
- "levelKey": "level",
- "nameKey": "logger",
- "callerKey": "caller",
- "messageKey": "msg",
- "stacktraceKey": "stacktrace",
- "lineEnding": "",
- "levelEncoder": "",
- "timeEncoder": "",
- "durationEncoder": "",
- "callerEncoder": ""
- }
- }
diff --git a/config/monitoring/150-dev/fluentd-configmap-dev.yaml b/config/monitoring/150-dev/fluentd-configmap-dev.yaml
index 970d9d998777..179dce2f91e2 100644
--- a/config/monitoring/150-dev/fluentd-configmap-dev.yaml
+++ b/config/monitoring/150-dev/fluentd-configmap-dev.yaml
@@ -28,8 +28,7 @@ data:
@id fluentd-containers.log
@type tail
- path /var/log/containers/*.log
- exclude_path ["/var/log/containers/istio-*.log", "/var/log/containers/kube-*.log", "/var/log/containers/kibana-*.log", "/var/log/containers/prometheus-*.log"]
+ path /var/log/containers/*ela-container-*.log,/var/log/containers/*build-step-*.log,/var/log/containers/*ela-controller-*.log,/var/log/containers/*ela-webhook-*.log,/var/log/containers/*autoscaler-*.log,/var/log/containers/*queue-proxy-*.log,/var/log/containers/*ela-activator-*.log
pos_file /var/log/es-containers.log.pos
time_format %Y-%m-%dT%H:%M:%S.%NZ
tag raw.kubernetes.*
@@ -52,7 +51,6 @@ data:
@type kubernetes_metadata
300.forward.input.conf: |-
- # Takes the messages sent over TCP
@type forward
diff --git a/config/webhook.yaml b/config/webhook.yaml
index 2feae0249585..f5f34b3bac44 100644
--- a/config/webhook.yaml
+++ b/config/webhook.yaml
@@ -32,9 +32,9 @@ spec:
# and substituted here.
image: github.com/knative/serving/cmd/webhook
volumeMounts:
- - name: ela-webhook-config
- mountPath: /etc/config
+ - name: config-logging
+ mountPath: /etc/config-logging
volumes:
- - name: ela-webhook-config
+ - name: config-logging
configMap:
- name: ela-webhook-config
+ name: config-logging
diff --git a/install/CONFIG.md b/install/CONFIG.md
index 201bc8a82f56..bca4cbb11fed 100644
--- a/install/CONFIG.md
+++ b/install/CONFIG.md
@@ -3,7 +3,7 @@
## Serving multiple domains:
Different domain suffixes can be configured based on the route labels. In order
-to do this, update the config map named `ela-config` in the namespace
+to do this, update the config map named `config-domain` in the namespace
`ela-system`.
In that config map, each entry maps a domain name to an equality-based label
@@ -17,7 +17,7 @@ For example, if your config map looks like
apiVersion: v1
kind: ConfigMap
metadata:
- name: ela-config
+ name: config-domain
namespace: ela-system
data:
prod.domain.com: |
diff --git a/pkg/autoscaler/BUILD.bazel b/pkg/autoscaler/BUILD.bazel
index 8600e3d282c3..5bff57940cba 100644
--- a/pkg/autoscaler/BUILD.bazel
+++ b/pkg/autoscaler/BUILD.bazel
@@ -10,7 +10,7 @@ go_library(
importpath = "github.com/knative/serving/pkg/autoscaler",
visibility = ["//visibility:public"],
deps = [
- "//vendor/github.com/golang/glog:go_default_library",
+ "//pkg/logging:go_default_library",
"//vendor/github.com/josephburnett/k8sflag/pkg/k8sflag:go_default_library",
"//vendor/go.opencensus.io/stats:go_default_library",
"//vendor/go.opencensus.io/stats/view:go_default_library",
@@ -26,8 +26,10 @@ go_test(
],
embed = [":go_default_library"],
deps = [
+ "//pkg/logging:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/josephburnett/k8sflag/pkg/k8sflag:go_default_library",
"//vendor/go.opencensus.io/stats/view:go_default_library",
+ "//vendor/go.uber.org/zap:go_default_library",
],
)
diff --git a/pkg/autoscaler/autoscaler.go b/pkg/autoscaler/autoscaler.go
index ef8abf3b431c..2b5d935336ec 100644
--- a/pkg/autoscaler/autoscaler.go
+++ b/pkg/autoscaler/autoscaler.go
@@ -16,11 +16,12 @@ limitations under the License.
package autoscaler
import (
+ "context"
"math"
"time"
- "github.com/golang/glog"
"github.com/josephburnett/k8sflag/pkg/k8sflag"
+ "github.com/knative/serving/pkg/logging"
)
// Stat defines a single measurement at a point in time
@@ -77,9 +78,10 @@ func NewAutoscaler(config Config, reporter StatsReporter) *Autoscaler {
}
// Record a data point. No safe for concurrent access or concurrent access with Scale.
-func (a *Autoscaler) Record(stat Stat) {
+func (a *Autoscaler) Record(ctx context.Context, stat Stat) {
if stat.Time == nil {
- glog.Errorf("Missing time from stat: %+v", stat)
+ logger := logging.FromContext(ctx)
+ logger.Errorf("Missing time from stat: %+v", stat)
return
}
key := statKey{
@@ -91,8 +93,8 @@ func (a *Autoscaler) Record(stat Stat) {
// Scale calculates the desired scale based on current statistics given the current time.
// Not safe for concurrent access or concurrent access with Record.
-func (a *Autoscaler) Scale(now time.Time) (int32, bool) {
-
+func (a *Autoscaler) Scale(ctx context.Context, now time.Time) (int32, bool) {
+ logger := logging.FromContext(ctx)
// 60 second window
stableTotal := float64(0)
stableCount := float64(0)
@@ -134,7 +136,6 @@ func (a *Autoscaler) Scale(now time.Time) (int32, bool) {
}
if lastRequestTime.Add(*a.ScaleToZeroThreshold.Get()).Before(now) {
- glog.Info("Threshold passed with no new requests. Scaling to 0.")
return 0, true
}
@@ -145,11 +146,11 @@ func (a *Autoscaler) Scale(now time.Time) (int32, bool) {
totalCurrentQPS = totalCurrentQPS + stat.RequestCount
totalCurrentConcurrency = totalCurrentConcurrency + stat.AverageConcurrentRequests
}
- glog.Infof("Current QPS: %v Current concurrent clients: %v", totalCurrentQPS, totalCurrentConcurrency)
+ logger.Debugf("Current QPS: %v Current concurrent clients: %v", totalCurrentQPS, totalCurrentConcurrency)
// Stop panicking after the surge has made its way into the stable metric.
if a.panicking && a.panicTime.Add(*a.StableWindow.Get()).Before(now) {
- glog.Info("Un-panicking.")
+ logger.Info("Un-panicking.")
a.reporter.Report(PanicM, 0)
a.panicking = false
a.panicTime = nil
@@ -158,7 +159,7 @@ func (a *Autoscaler) Scale(now time.Time) (int32, bool) {
// Do nothing when we have no data.
if len(stablePods) == 0 {
- glog.Info("No data to scale on.")
+ logger.Debug("No data to scale on.")
return 0, false
}
@@ -174,29 +175,29 @@ func (a *Autoscaler) Scale(now time.Time) (int32, bool) {
desiredStablePodCount := desiredStableScalingRatio * float64(len(stablePods))
desiredPanicPodCount := desiredPanicScalingRatio * float64(len(stablePods))
- glog.Infof("Observed average %0.3f concurrency over %v seconds over %v samples over %v pods.",
+ logger.Debugf("Observed average %0.3f concurrency over %v seconds over %v samples over %v pods.",
observedStableConcurrency, a.StableWindow.Get(), stableCount, len(stablePods))
- glog.Infof("Observed average %0.3f concurrency over %v seconds over %v samples over %v pods.",
+ logger.Debugf("Observed average %0.3f concurrency over %v seconds over %v samples over %v pods.",
observedPanicConcurrency, a.PanicWindow.Get(), panicCount, len(panicPods))
// Begin panicking when we cross the 6 second concurrency threshold.
if !a.panicking && len(panicPods) > 0 && observedPanicConcurrency >= (a.TargetConcurrency.Get()*2) {
- glog.Info("PANICKING")
+ logger.Info("PANICKING")
a.reporter.Report(PanicM, 1)
a.panicking = true
a.panicTime = &now
}
if a.panicking {
- glog.Info("Operating in panic mode.")
+ logger.Debug("Operating in panic mode.")
if desiredPanicPodCount > a.maxPanicPods {
- glog.Infof("Increasing pods from %v to %v.", len(panicPods), int(desiredPanicPodCount))
+ logger.Infof("Increasing pods from %v to %v.", len(panicPods), int(desiredPanicPodCount))
a.panicTime = &now
a.maxPanicPods = desiredPanicPodCount
}
return int32(math.Max(1.0, math.Ceil(a.maxPanicPods))), true
}
- glog.Info("Operating in stable mode.")
+ logger.Debug("Operating in stable mode.")
return int32(math.Max(1.0, math.Ceil(desiredStablePodCount))), true
}
diff --git a/pkg/autoscaler/autoscaler_test.go b/pkg/autoscaler/autoscaler_test.go
index 9f9511234d82..5d0b61869e7e 100644
--- a/pkg/autoscaler/autoscaler_test.go
+++ b/pkg/autoscaler/autoscaler_test.go
@@ -16,12 +16,21 @@ limitations under the License.
package autoscaler
import (
+ "context"
"fmt"
"testing"
"time"
+ "go.uber.org/zap"
+
"github.com/golang/glog"
"github.com/josephburnett/k8sflag/pkg/k8sflag"
+ "github.com/knative/serving/pkg/logging"
+)
+
+var (
+ testLogger = zap.NewNop().Sugar()
+ testCtx = logging.WithLogger(context.TODO(), testLogger)
)
func TestAutoscaler_NoData_NoAutoscale(t *testing.T) {
@@ -297,14 +306,14 @@ func (a *Autoscaler) recordLinearSeries(now time.Time, s linearSeries) time.Time
PodName: fmt.Sprintf("pod-%v", j),
AverageConcurrentRequests: float64(point),
}
- a.Record(stat)
+ a.Record(testCtx, stat)
}
}
return now
}
func (a *Autoscaler) expectScale(t *testing.T, now time.Time, expectScale int32, expectOk bool) {
- scale, ok := a.Scale(now)
+ scale, ok := a.Scale(testCtx, now)
if ok != expectOk {
t.Errorf("Unexpected autoscale decison. Expected %v. Got %v.", expectOk, ok)
}
diff --git a/pkg/client/clientset/versioned/BUILD.bazel b/pkg/client/clientset/versioned/BUILD.bazel
index 3bbf4cf97997..e702cb0c712f 100644
--- a/pkg/client/clientset/versioned/BUILD.bazel
+++ b/pkg/client/clientset/versioned/BUILD.bazel
@@ -11,7 +11,6 @@ go_library(
deps = [
"//pkg/client/clientset/versioned/typed/istio/v1alpha2:go_default_library",
"//pkg/client/clientset/versioned/typed/serving/v1alpha1:go_default_library",
- "//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/client-go/discovery:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
"//vendor/k8s.io/client-go/util/flowcontrol:go_default_library",
diff --git a/pkg/client/clientset/versioned/clientset.go b/pkg/client/clientset/versioned/clientset.go
index 7be974c10af2..c136a5cbb39d 100644
--- a/pkg/client/clientset/versioned/clientset.go
+++ b/pkg/client/clientset/versioned/clientset.go
@@ -16,7 +16,6 @@ limitations under the License.
package versioned
import (
- glog "github.com/golang/glog"
configv1alpha2 "github.com/knative/serving/pkg/client/clientset/versioned/typed/istio/v1alpha2"
servingv1alpha1 "github.com/knative/serving/pkg/client/clientset/versioned/typed/serving/v1alpha1"
discovery "k8s.io/client-go/discovery"
@@ -91,7 +90,6 @@ func NewForConfig(c *rest.Config) (*Clientset, error) {
cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy)
if err != nil {
- glog.Errorf("failed to create the DiscoveryClient: %v", err)
return nil, err
}
return &cs, nil
diff --git a/pkg/controller/config.go b/pkg/controller/config.go
index 1986821be045..725d75e26ef2 100644
--- a/pkg/controller/config.go
+++ b/pkg/controller/config.go
@@ -60,7 +60,7 @@ const (
)
func NewConfig(kubeClient kubernetes.Interface) (*Config, error) {
- m, err := kubeClient.CoreV1().ConfigMaps(elaNamespace).Get(GetElaConfigMapName(), metav1.GetOptions{})
+ m, err := kubeClient.CoreV1().ConfigMaps(elaNamespace).Get(GetDomainConfigMapName(), metav1.GetOptions{})
if err != nil {
return nil, err
}
@@ -68,12 +68,6 @@ func NewConfig(kubeClient kubernetes.Interface) (*Config, error) {
hasDefault := false
for k, v := range m.Data {
// TODO(josephburnett): migrate domain configuration to k8sflag
- if strings.HasPrefix(k, "autoscale.") {
- continue
- }
- if strings.HasPrefix(k, "logging.") {
- continue
- }
labelSelector := LabelSelector{}
err := yaml.Unmarshal([]byte(v), &labelSelector)
if err != nil {
diff --git a/pkg/controller/config_test.go b/pkg/controller/config_test.go
index c479bb7e99f3..df19a1a9f09f 100644
--- a/pkg/controller/config_test.go
+++ b/pkg/controller/config_test.go
@@ -67,7 +67,7 @@ func TestNewConfigNoEntry(t *testing.T) {
kubeClient.CoreV1().ConfigMaps(elaNamespace).Create(&corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Namespace: elaNamespace,
- Name: GetElaConfigMapName(),
+ Name: GetDomainConfigMapName(),
},
})
_, err := NewConfig(kubeClient)
@@ -97,7 +97,7 @@ func TestNewConfig(t *testing.T) {
kubeClient.CoreV1().ConfigMaps(elaNamespace).Create(&corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Namespace: elaNamespace,
- Name: GetElaConfigMapName(),
+ Name: GetDomainConfigMapName(),
},
Data: map[string]string{
"test-domain.foo.com": "selector:\n app: foo",
diff --git a/pkg/controller/names.go b/pkg/controller/names.go
index 624b4bacfe37..d0597e4e7a0b 100644
--- a/pkg/controller/names.go
+++ b/pkg/controller/names.go
@@ -30,8 +30,8 @@ import (
clientset "k8s.io/client-go/kubernetes"
)
-func GetElaConfigMapName() string {
- return "ela-config"
+func GetDomainConfigMapName() string {
+ return "config-domain"
}
// Various functions for naming the resources for consistency
diff --git a/pkg/controller/revision/BUILD.bazel b/pkg/controller/revision/BUILD.bazel
index 00eef661ebfb..93345f4b1139 100644
--- a/pkg/controller/revision/BUILD.bazel
+++ b/pkg/controller/revision/BUILD.bazel
@@ -7,6 +7,7 @@ go_library(
"ela_autoscaler.go",
"ela_fluentd.go",
"ela_pod.go",
+ "ela_queue.go",
"ela_resolve.go",
"ela_resource.go",
"ela_service.go",
diff --git a/pkg/controller/revision/ela_autoscaler.go b/pkg/controller/revision/ela_autoscaler.go
index 18355a5c9f27..4cb53af73137 100644
--- a/pkg/controller/revision/ela_autoscaler.go
+++ b/pkg/controller/revision/ela_autoscaler.go
@@ -24,8 +24,8 @@ import (
"github.com/knative/serving/pkg/apis/serving/v1alpha1"
"github.com/knative/serving/pkg/controller"
- corev1 "k8s.io/api/core/v1"
appsv1 "k8s.io/api/apps/v1"
+ corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
@@ -49,16 +49,30 @@ func MakeElaAutoscalerDeployment(rev *v1alpha1.Revision, autoscalerImage string)
replicas := int32(1)
- configVolume := corev1.Volume{
- Name: "ela-config",
+ const autoscalerConfigName = "config-autoscaler"
+ autoscalerConfigVolume := corev1.Volume{
+ Name: autoscalerConfigName,
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
- Name: "ela-config",
+ Name: autoscalerConfigName,
},
},
},
}
+
+ const loggingConfigName = "config-logging"
+ loggingConfigVolume := corev1.Volume{
+ Name: loggingConfigName,
+ VolumeSource: corev1.VolumeSource{
+ ConfigMap: &corev1.ConfigMapVolumeSource{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: loggingConfigName,
+ },
+ },
+ },
+ }
+
return &appsv1.Deployment{
ObjectMeta: meta_v1.ObjectMeta{
Name: controller.GetRevisionAutoscalerName(rev),
@@ -115,20 +129,22 @@ func MakeElaAutoscalerDeployment(rev *v1alpha1.Revision, autoscalerImage string)
},
},
Args: []string{
- "-logtostderr=true",
- "-stderrthreshold=INFO",
fmt.Sprintf("-concurrencyModel=%v", rev.Spec.ConcurrencyModel),
},
VolumeMounts: []corev1.VolumeMount{
corev1.VolumeMount{
- Name: "ela-config",
- MountPath: "/etc/config",
+ Name: autoscalerConfigName,
+ MountPath: "/etc/config-autoscaler",
+ },
+ corev1.VolumeMount{
+ Name: loggingConfigName,
+ MountPath: "/etc/config-logging",
},
},
},
},
ServiceAccountName: "ela-autoscaler",
- Volumes: []corev1.Volume{configVolume},
+ Volumes: []corev1.Volume{autoscalerConfigVolume, loggingConfigVolume},
},
},
},
diff --git a/pkg/controller/revision/ela_pod.go b/pkg/controller/revision/ela_pod.go
index 1587f6309b22..7744c6862c8e 100644
--- a/pkg/controller/revision/ela_pod.go
+++ b/pkg/controller/revision/ela_pod.go
@@ -17,9 +17,6 @@ limitations under the License.
package revision
import (
- "fmt"
- "strconv"
-
"github.com/knative/serving/pkg/apis/serving/v1alpha1"
"github.com/knative/serving/pkg/controller"
"github.com/knative/serving/pkg/queue"
@@ -109,83 +106,8 @@ func MakeElaPodSpec(
elaContainer.ReadinessProbe.Handler.HTTPGet.Port = intstr.FromInt(queue.RequestQueuePort)
}
- queueContainer := corev1.Container{
- Name: queueContainerName,
- Image: controllerConfig.QueueSidecarImage,
- Resources: corev1.ResourceRequirements{
- Requests: corev1.ResourceList{
- corev1.ResourceName("cpu"): resource.MustParse(queueContainerCPU),
- },
- },
- Ports: []corev1.ContainerPort{
- {
- Name: queue.RequestQueuePortName,
- ContainerPort: int32(queue.RequestQueuePort),
- },
- // Provides health checks and lifecycle hooks.
- {
- Name: queue.RequestQueueAdminPortName,
- ContainerPort: int32(queue.RequestQueueAdminPort),
- },
- },
- // This handler (1) marks the service as not ready and (2)
- // adds a small delay before the container is killed.
- Lifecycle: &corev1.Lifecycle{
- PreStop: &corev1.Handler{
- HTTPGet: &corev1.HTTPGetAction{
- Port: intstr.FromInt(queue.RequestQueueAdminPort),
- Path: queue.RequestQueueQuitPath,
- },
- },
- },
- ReadinessProbe: &corev1.Probe{
- Handler: corev1.Handler{
- HTTPGet: &corev1.HTTPGetAction{
- Port: intstr.FromInt(queue.RequestQueueAdminPort),
- Path: queue.RequestQueueHealthPath,
- },
- },
- // We want to mark the service as not ready as soon as the
- // PreStop handler is called, so we need to check a little
- // bit more often than the default. It is a small
- // sacrifice for a low rate of 503s.
- PeriodSeconds: 1,
- },
- Args: []string{
- "-logtostderr=true",
- "-stderrthreshold=INFO",
- fmt.Sprintf("-concurrencyQuantumOfTime=%v", controllerConfig.AutoscaleConcurrencyQuantumOfTime.Get()),
- },
- Env: []corev1.EnvVar{
- {
- Name: "ELA_NAMESPACE",
- Value: rev.Namespace,
- },
- {
- Name: "ELA_REVISION",
- Value: rev.Name,
- },
- {
- Name: "ELA_AUTOSCALER",
- Value: controller.GetRevisionAutoscalerName(rev),
- },
- {
- Name: "ELA_AUTOSCALER_PORT",
- Value: strconv.Itoa(autoscalerPort),
- },
- {
- Name: "ELA_POD",
- ValueFrom: &corev1.EnvVarSource{
- FieldRef: &corev1.ObjectFieldSelector{
- FieldPath: "metadata.name",
- },
- },
- },
- },
- }
-
podSpe := &corev1.PodSpec{
- Containers: []corev1.Container{*elaContainer, queueContainer},
+ Containers: []corev1.Container{*elaContainer, *MakeElaQueueContainer(rev, controllerConfig)},
Volumes: []corev1.Volume{varLogVolume},
ServiceAccountName: rev.Spec.ServiceAccountName,
}
diff --git a/pkg/controller/revision/ela_queue.go b/pkg/controller/revision/ela_queue.go
new file mode 100644
index 000000000000..a8f85b543cfb
--- /dev/null
+++ b/pkg/controller/revision/ela_queue.go
@@ -0,0 +1,118 @@
+/*
+Copyright 2018 Google LLC
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package revision
+
+import (
+ "fmt"
+ "strconv"
+
+ "github.com/knative/serving/pkg/apis/serving/v1alpha1"
+ "github.com/knative/serving/pkg/controller"
+ "github.com/knative/serving/pkg/queue"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/resource"
+ "k8s.io/apimachinery/pkg/util/intstr"
+)
+
+// MakeElaQueueContainer creates the container spec for queue sidecar.
+func MakeElaQueueContainer(rev *v1alpha1.Revision, controllerConfig *ControllerConfig) *corev1.Container {
+ const elaQueueConfigVolumeName = "ela-queue-config"
+ return &corev1.Container{
+ Name: queueContainerName,
+ Image: controllerConfig.QueueSidecarImage,
+ Resources: corev1.ResourceRequirements{
+ Requests: corev1.ResourceList{
+ corev1.ResourceName("cpu"): resource.MustParse(queueContainerCPU),
+ },
+ },
+ Ports: []corev1.ContainerPort{
+ {
+ Name: queue.RequestQueuePortName,
+ ContainerPort: int32(queue.RequestQueuePort),
+ },
+ // Provides health checks and lifecycle hooks.
+ {
+ Name: queue.RequestQueueAdminPortName,
+ ContainerPort: int32(queue.RequestQueueAdminPort),
+ },
+ },
+ // This handler (1) marks the service as not ready and (2)
+ // adds a small delay before the container is killed.
+ Lifecycle: &corev1.Lifecycle{
+ PreStop: &corev1.Handler{
+ HTTPGet: &corev1.HTTPGetAction{
+ Port: intstr.FromInt(queue.RequestQueueAdminPort),
+ Path: queue.RequestQueueQuitPath,
+ },
+ },
+ },
+ ReadinessProbe: &corev1.Probe{
+ Handler: corev1.Handler{
+ HTTPGet: &corev1.HTTPGetAction{
+ Port: intstr.FromInt(queue.RequestQueueAdminPort),
+ Path: queue.RequestQueueHealthPath,
+ },
+ },
+ // We want to mark the service as not ready as soon as the
+ // PreStop handler is called, so we need to check a little
+ // bit more often than the default. It is a small
+ // sacrifice for a low rate of 503s.
+ PeriodSeconds: 1,
+ },
+ Args: []string{
+ fmt.Sprintf("-concurrencyQuantumOfTime=%v", controllerConfig.AutoscaleConcurrencyQuantumOfTime.Get()),
+ },
+ Env: []corev1.EnvVar{
+ {
+ Name: "ELA_NAMESPACE",
+ Value: rev.Namespace,
+ },
+ {
+ Name: "ELA_CONFIGURATION",
+ Value: controller.LookupOwningConfigurationName(rev.OwnerReferences),
+ },
+ {
+ Name: "ELA_REVISION",
+ Value: rev.Name,
+ },
+ {
+ Name: "ELA_AUTOSCALER",
+ Value: controller.GetRevisionAutoscalerName(rev),
+ },
+ {
+ Name: "ELA_AUTOSCALER_PORT",
+ Value: strconv.Itoa(autoscalerPort),
+ },
+ {
+ Name: "ELA_POD",
+ ValueFrom: &corev1.EnvVarSource{
+ FieldRef: &corev1.ObjectFieldSelector{
+ FieldPath: "metadata.name",
+ },
+ },
+ },
+ {
+ Name: "ELA_LOGGING_CONFIG",
+ Value: controllerConfig.QueueProxyLoggingConfig,
+ },
+ {
+ Name: "ELA_LOGGING_LEVEL",
+ Value: controllerConfig.QueueProxyLoggingLevel,
+ },
+ },
+ }
+}
diff --git a/pkg/controller/revision/revision.go b/pkg/controller/revision/revision.go
index e05b8f022653..f8c1962c7cde 100644
--- a/pkg/controller/revision/revision.go
+++ b/pkg/controller/revision/revision.go
@@ -112,7 +112,7 @@ type Controller struct {
type ControllerConfig struct {
// Autoscale part
- // see (elaconfig.yaml)
+ // see (config-autoscaler.yaml)
AutoscaleConcurrencyQuantumOfTime *k8sflag.DurationFlag
AutoscaleEnableSingleConcurrency *k8sflag.BoolFlag
@@ -141,6 +141,12 @@ type ControllerConfig struct {
// LoggingURLTemplate is a string containing the logging url template where
// the variable REVISION_UID will be replaced with the created revision's UID.
LoggingURLTemplate string
+
+ // QueueProxyLoggingConfig is a string containing the logger configuration for queue proxy.
+ QueueProxyLoggingConfig string
+
+ // QueueProxyLoggingLevel is a string containing the logger level for queue proxy.
+ QueueProxyLoggingLevel string
}
// NewController initializes the controller and is called by the generated code
diff --git a/pkg/controller/revision/revision_test.go b/pkg/controller/revision/revision_test.go
index 0a716d46574c..fbb9b89d779c 100644
--- a/pkg/controller/revision/revision_test.go
+++ b/pkg/controller/revision/revision_test.go
@@ -20,6 +20,7 @@ package revision
- When a Revision is updated TODO
- When a Revision is deleted TODO
*/
+
import (
"errors"
"fmt"
@@ -200,11 +201,14 @@ func getTestControllerConfig() ControllerConfig {
return ControllerConfig{
QueueSidecarImage: testQueueImage,
AutoscalerImage: testAutoscalerImage,
- AutoscaleConcurrencyQuantumOfTime: k8sflag.Duration("autoscale.concurrency-quantum-of-time", &autoscaleConcurrencyQuantumOfTime),
+ AutoscaleConcurrencyQuantumOfTime: k8sflag.Duration("concurrency-quantum-of-time", &autoscaleConcurrencyQuantumOfTime),
EnableVarLogCollection: true,
FluentdSidecarImage: testFluentdImage,
FluentdSidecarOutputConfig: testFluentdSidecarOutputConfig,
+
+ QueueProxyLoggingConfig: "{\"level\": \"error\",\n\"outputPaths\": [\"stdout\"],\n\"errorOutputPaths\": [\"stderr\"],\n\"encoding\": \"json\"}",
+ QueueProxyLoggingLevel: "info",
}
}
@@ -320,7 +324,8 @@ func (r *fixedResolver) Resolve(deploy *appsv1.Deployment) error {
}
func TestCreateRevCreatesStuff(t *testing.T) {
- kubeClient, _, elaClient, controller, _, elaInformer := newTestController(t)
+ controllerConfig := getTestControllerConfig()
+ kubeClient, _, elaClient, controller, _, elaInformer := newTestControllerWithConfig(t, &controllerConfig)
// Resolve image references to this "digest"
digest := "foo@sha256:deadbeef"
@@ -390,8 +395,13 @@ func TestCreateRevCreatesStuff(t *testing.T) {
if container.Name == "queue-proxy" {
foundQueueProxy = true
checkEnv(container.Env, "ELA_NAMESPACE", testNamespace, "")
- checkEnv(container.Env, "ELA_REVISION", "test-rev", "")
+ checkEnv(container.Env, "ELA_CONFIGURATION", config.Name, "")
+ checkEnv(container.Env, "ELA_REVISION", rev.Name, "")
checkEnv(container.Env, "ELA_POD", "", "metadata.name")
+ checkEnv(container.Env, "ELA_AUTOSCALER", ctrl.GetRevisionAutoscalerName(rev), "")
+ checkEnv(container.Env, "ELA_AUTOSCALER_PORT", strconv.Itoa(autoscalerPort), "")
+ checkEnv(container.Env, "ELA_LOGGING_CONFIG", controllerConfig.QueueProxyLoggingConfig, "")
+ checkEnv(container.Env, "ELA_LOGGING_LEVEL", controllerConfig.QueueProxyLoggingLevel, "")
if diff := cmp.Diff(expectedPreStop, container.Lifecycle.PreStop); diff != "" {
t.Errorf("Unexpected PreStop diff in container %q (-want +got): %v", container.Name, diff)
}
@@ -410,8 +420,8 @@ func TestCreateRevCreatesStuff(t *testing.T) {
if container.Name == "fluentd-proxy" {
foundFluentdProxy = true
checkEnv(container.Env, "ELA_NAMESPACE", testNamespace, "")
- checkEnv(container.Env, "ELA_REVISION", "test-rev", "")
- checkEnv(container.Env, "ELA_CONFIGURATION", "test-config", "")
+ checkEnv(container.Env, "ELA_REVISION", rev.Name, "")
+ checkEnv(container.Env, "ELA_CONFIGURATION", config.Name, "")
checkEnv(container.Env, "ELA_CONTAINER_NAME", "ela-container", "")
checkEnv(container.Env, "ELA_POD_NAME", "", "metadata.name")
}
@@ -517,6 +527,16 @@ func TestCreateRevCreatesStuff(t *testing.T) {
checkEnv(container.Env, "ELA_CONFIGURATION", config.Name, "")
checkEnv(container.Env, "ELA_REVISION", rev.Name, "")
checkEnv(container.Env, "ELA_AUTOSCALER_PORT", strconv.Itoa(autoscalerPort), "")
+ if got, want := len(container.VolumeMounts), 2; got != want {
+ t.Errorf("Unexpected number of volume mounts: got: %v, want: %v", got, want)
+ } else {
+ if got, want := container.VolumeMounts[0].MountPath, "/etc/config-autoscaler"; got != want {
+ t.Errorf("Unexpected volume mount path: got: %v, want: %v", got, want)
+ }
+ if got, want := container.VolumeMounts[1].MountPath, "/etc/config-logging"; got != want {
+ t.Errorf("Unexpected volume mount path: got: %v, want: %v", got, want)
+ }
+ }
break
}
}
@@ -524,6 +544,18 @@ func TestCreateRevCreatesStuff(t *testing.T) {
t.Error("Missing autoscaler")
}
+ // Validate the config volumes for auto scaler
+ if got, want := len(asDeployment.Spec.Template.Spec.Volumes), 2; got != want {
+ t.Errorf("Unexpected number of volumes: got: %v, want: %v", got, want)
+ } else {
+ if got, want := asDeployment.Spec.Template.Spec.Volumes[0].ConfigMap.LocalObjectReference.Name, "config-autoscaler"; got != want {
+ t.Errorf("Unexpected configmap reference: got: %v, want: %v", got, want)
+ }
+ if got, want := asDeployment.Spec.Template.Spec.Volumes[1].ConfigMap.LocalObjectReference.Name, "config-logging"; got != want {
+ t.Errorf("Unexpected configmap reference: got: %v, want: %v", got, want)
+ }
+ }
+
// Look for the autoscaler service.
asService, err := kubeClient.CoreV1().Services(AutoscalerNamespace).Get(expectedAutoscalerName, metav1.GetOptions{})
if err != nil {
diff --git a/pkg/controller/route/route.go b/pkg/controller/route/route.go
index 76e09fc3d02a..fe0c0d439a4e 100644
--- a/pkg/controller/route/route.go
+++ b/pkg/controller/route/route.go
@@ -900,7 +900,7 @@ func (c *Controller) addConfigurationEvent(obj interface{}) {
// Don't modify the informers copy
route = route.DeepCopy()
if _, err := c.syncTrafficTargetsAndUpdateRouteStatus(ctx, route); err != nil {
- logger.Errorf("Error updating route upon configuration becoming ready", zap.Error(err))
+ logger.Error("Error updating route upon configuration becoming ready", zap.Error(err))
}
}
diff --git a/pkg/controller/route/route_test.go b/pkg/controller/route/route_test.go
index 7341e1bf5974..2bc6752ead76 100644
--- a/pkg/controller/route/route_test.go
+++ b/pkg/controller/route/route_test.go
@@ -193,7 +193,7 @@ func newTestController(t *testing.T, elaObjects ...runtime.Object) (
},
},
},
- k8sflag.Bool("autoscaler.enable-scale-to-zero", false),
+ k8sflag.Bool("enable-scale-to-zero", false),
testLogger,
).(*Controller)
@@ -354,7 +354,7 @@ func TestCreateRouteCreatesStuff(t *testing.T) {
// Test the only revision in the route is in Reserve (inactive) serving status.
func TestCreateRouteForOneReserveRevision(t *testing.T) {
kubeClient, elaClient, controller, _, elaInformer := newTestController(t)
- controller.enableScaleToZero = k8sflag.Bool("autoscaler.enable-scale-to-zero", true)
+ controller.enableScaleToZero = k8sflag.Bool("enable-scale-to-zero", true)
h := NewHooks()
// Look for the events. Events are delivered asynchronously so we need to use
@@ -617,7 +617,7 @@ func TestCreateRouteWithMultipleTargets(t *testing.T) {
// Test one out of multiple target revisions is in Reserve serving state.
func TestCreateRouteWithOneTargetReserve(t *testing.T) {
_, elaClient, controller, _, elaInformer := newTestController(t)
- controller.enableScaleToZero = k8sflag.Bool("autoscaler.enable-scale-to-zero", true)
+ controller.enableScaleToZero = k8sflag.Bool("enable-scale-to-zero", true)
// A standalone inactive revision
rev := getTestRevisionWithCondition("test-rev",
diff --git a/pkg/logging/BUILD.bazel b/pkg/logging/BUILD.bazel
index 5e0b28a6916d..e1d2f24c2b77 100644
--- a/pkg/logging/BUILD.bazel
+++ b/pkg/logging/BUILD.bazel
@@ -10,7 +10,9 @@ go_library(
visibility = ["//visibility:public"],
deps = [
"//pkg/logging/logkey:go_default_library",
+ "//vendor/github.com/josephburnett/k8sflag/pkg/k8sflag:go_default_library",
"//vendor/go.uber.org/zap:go_default_library",
+ "//vendor/go.uber.org/zap/zapcore:go_default_library",
],
)
diff --git a/pkg/logging/config.go b/pkg/logging/config.go
index 683097f37370..cad0291bf19a 100644
--- a/pkg/logging/config.go
+++ b/pkg/logging/config.go
@@ -21,17 +21,18 @@ import (
"errors"
"github.com/knative/serving/pkg/logging/logkey"
+ "github.com/josephburnett/k8sflag/pkg/k8sflag"
"go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
)
// NewLogger creates a logger with the supplied configuration.
// If configuration is empty, a fallback configuration is used.
// If configuration cannot be used to instantiate a logger,
// the same fallback configuration is used.
-func NewLogger(configJSON string) *zap.SugaredLogger {
- logger, err := newLoggerFromConfig(configJSON)
+func NewLogger(configJSON string, levelOverride string) *zap.SugaredLogger {
+ logger, err := newLoggerFromConfig(configJSON, levelOverride)
if err == nil {
- logger.Info("Successfully created the logger.", zap.String(logkey.JSONConfig, configJSON))
return logger.Sugar()
}
@@ -44,7 +45,15 @@ func NewLogger(configJSON string) *zap.SugaredLogger {
return logger.Sugar()
}
-func newLoggerFromConfig(configJSON string) (*zap.Logger, error) {
+// NewLoggerFromDefaultConfigMap creates a logger using the configuration within /etc/config-logging file.
+func NewLoggerFromDefaultConfigMap(logLevelKey string) *zap.SugaredLogger {
+ loggingFlagSet := k8sflag.NewFlagSet("/etc/config-logging")
+ zapCfg := loggingFlagSet.String("zap-logger-config", "")
+ zapLevelOverride := loggingFlagSet.String(logLevelKey, "")
+ return NewLogger(zapCfg.Get(), zapLevelOverride.Get())
+}
+
+func newLoggerFromConfig(configJSON string, levelOverride string) (*zap.Logger, error) {
if len(configJSON) == 0 {
return nil, errors.New("empty logging configuration")
}
@@ -54,5 +63,19 @@ func newLoggerFromConfig(configJSON string) (*zap.Logger, error) {
return nil, err
}
- return loggingCfg.Build()
+ if len(levelOverride) > 0 {
+ var level zapcore.Level
+ if err := level.Set(levelOverride); err == nil {
+ loggingCfg.Level = zap.NewAtomicLevelAt(level)
+ }
+ }
+
+ logger, err := loggingCfg.Build()
+ if err != nil {
+ return nil, err
+ }
+
+ logger.Info("Successfully created the logger.", zap.String(logkey.JSONConfig, configJSON))
+ logger.Sugar().Infof("Logging level set to %v", loggingCfg.Level)
+ return logger, nil
}
diff --git a/pkg/logging/config_test.go b/pkg/logging/config_test.go
index 7b28b58f04e6..b6ff544573d6 100644
--- a/pkg/logging/config_test.go
+++ b/pkg/logging/config_test.go
@@ -23,41 +23,61 @@ import (
)
func TestNewLogger(t *testing.T) {
- logger := NewLogger("")
+ logger := NewLogger("", "")
if logger == nil {
t.Error("expected a non-nil logger")
}
- logger = NewLogger("some invalid JSON here")
+ logger = NewLogger("some invalid JSON here", "")
if logger == nil {
t.Error("expected a non-nil logger")
}
// No good way to test if all the config is applied,
// but at the minimum, we can check and see if level is getting applied.
- logger = NewLogger("{\"level\": \"error\", \"outputPaths\": [\"stdout\"],\"errorOutputPaths\": [\"stderr\"],\"encoding\": \"json\"}")
+ logger = NewLogger("{\"level\": \"error\", \"outputPaths\": [\"stdout\"],\"errorOutputPaths\": [\"stderr\"],\"encoding\": \"json\"}", "")
if logger == nil {
t.Error("expected a non-nil logger")
}
-
if ce := logger.Desugar().Check(zap.InfoLevel, "test"); ce != nil {
t.Error("not expected to get info logs from the logger configured with error as min threshold")
}
-
if ce := logger.Desugar().Check(zap.ErrorLevel, "test"); ce == nil {
t.Error("expected to get error logs from the logger configured with error as min threshold")
}
- logger = NewLogger("{\"level\": \"info\", \"outputPaths\": [\"stdout\"],\"errorOutputPaths\": [\"stderr\"],\"encoding\": \"json\"}")
+ logger = NewLogger("{\"level\": \"info\", \"outputPaths\": [\"stdout\"],\"errorOutputPaths\": [\"stderr\"],\"encoding\": \"json\"}", "")
if logger == nil {
t.Error("expected a non-nil logger")
}
-
if ce := logger.Desugar().Check(zap.DebugLevel, "test"); ce != nil {
t.Error("not expected to get debug logs from the logger configured with info as min threshold")
}
+ if ce := logger.Desugar().Check(zap.InfoLevel, "test"); ce == nil {
+ t.Error("expected to get info logs from the logger configured with info as min threshold")
+ }
+ // Test logging override
+ logger = NewLogger("{\"level\": \"error\", \"outputPaths\": [\"stdout\"],\"errorOutputPaths\": [\"stderr\"],\"encoding\": \"json\"}", "info")
+ if logger == nil {
+ t.Error("expected a non-nil logger")
+ }
+ if ce := logger.Desugar().Check(zap.DebugLevel, "test"); ce != nil {
+ t.Error("not expected to get debug logs from the logger configured with info as min threshold")
+ }
if ce := logger.Desugar().Check(zap.InfoLevel, "test"); ce == nil {
t.Error("expected to get info logs from the logger configured with info as min threshold")
}
+
+ // Invalid logging override
+ logger = NewLogger("{\"level\": \"error\", \"outputPaths\": [\"stdout\"],\"errorOutputPaths\": [\"stderr\"],\"encoding\": \"json\"}", "randomstring")
+ if logger == nil {
+ t.Error("expected a non-nil logger")
+ }
+ if ce := logger.Desugar().Check(zap.InfoLevel, "test"); ce != nil {
+ t.Error("not expected to get info logs from the logger configured with error as min threshold")
+ }
+ if ce := logger.Desugar().Check(zap.ErrorLevel, "test"); ce == nil {
+ t.Error("expected to get error logs from the logger configured with error as min threshold")
+ }
}
diff --git a/pkg/logging/logkey/constants.go b/pkg/logging/logkey/constants.go
index 2935baac1f1f..3530ac6c3edc 100644
--- a/pkg/logging/logkey/constants.go
+++ b/pkg/logging/logkey/constants.go
@@ -58,4 +58,7 @@ const (
// UserInfo is the key used to represent a user information in logs
UserInfo = "knative.dev/userinfo"
+
+ // Pod is the key used to represent a pod's name in logs
+ Pod = "knative.dev/pod"
)