Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 8 additions & 20 deletions DEVELOPMENT.md
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,8 @@ To check out this repository:

1. Create your own [fork of this
repo](https://help.github.com/articles/fork-a-repo/)
2. Clone it to your machine:
1. Clone it to your machine:

```shell
mkdir -p ${GOPATH}/src/github.com/knative
cd ${GOPATH}/src/github.com/knative
Expand Down Expand Up @@ -156,6 +157,7 @@ ko apply -f config/
```

You can see things running with:

```shell
kubectl -n knative-serving get pods
NAME READY STATUS RESTARTS AGE
Expand All @@ -173,35 +175,19 @@ If you're using a GCP project to host your Kubernetes cluster, it's good to chec
[Discovery & load balancing](http://console.developers.google.com/kubernetes/discovery)
page to ensure that all services are up and running (and not blocked by a quota issue, for example).

### Enable log and metric collection
### Install logging and monitoring backends

You can use two different setups for collecting logs(to Elasticsearch&Kibana) and metrics
(See [Logs and Metrics](./docs/telemetry.md) for setting up other logging backend):

1. **150-elasticsearch-prod**: This configuration collects logs & metrics from user containers, build controller and Istio requests.
Run:

```shell
kubectl apply -R -f config/monitoring/100-common \
-f config/monitoring/150-elasticsearch-prod \
-f config/monitoring/150-elasticsearch \
-f third_party/config/monitoring/common \
-f third_party/config/monitoring/elasticsearch \
-f config/monitoring/200-common \
-f config/monitoring/200-common/100-istio.yaml
```

1. **150-elasticsearch-dev**: This configuration collects everything in (1) plus Knative Serving controller logs.

```shell
kubectl apply -R -f config/monitoring/100-common \
-f config/monitoring/150-elasticsearch-dev \
-f third_party/config/monitoring/common \
-f third_party/config/monitoring/elasticsearch \
-f config/monitoring/200-common \
-f config/monitoring/200-common/100-istio.yaml
```

Once complete, follow the instructions at [Logs and Metrics](./docs/telemetry.md).

## Iterating

As you make changes to the code-base, there are two special cases to be aware of:
Expand All @@ -213,6 +199,7 @@ As you make changes to the code-base, there are two special cases to be aware of
These are both idempotent, and we expect that running these at `HEAD` to have no diffs.

Once the codegen and dependency information is correct, redeploying the controller is simply:

```shell
ko apply -f config/controller.yaml
```
Expand All @@ -223,6 +210,7 @@ redeploy `Knative Serving`](./README.md#start-knative).
## Clean up

You can delete all of the service components with:

```shell
ko delete --ignore-not-found=true \
-f config/monitoring/100-common \
Expand Down
17 changes: 14 additions & 3 deletions cmd/activator/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,13 +28,16 @@ import (
"net/url"
"time"

"github.com/knative/serving/pkg/logging/logkey"

"github.com/knative/serving/pkg/activator"
clientset "github.com/knative/serving/pkg/client/clientset/versioned"
"github.com/knative/serving/pkg/configmap"
"github.com/knative/serving/pkg/controller"
h2cutil "github.com/knative/serving/pkg/h2c"
"github.com/knative/serving/pkg/logging"
"github.com/knative/serving/pkg/signals"
"github.com/knative/serving/pkg/system"
"github.com/knative/serving/third_party/h2c"
"go.uber.org/zap"
"k8s.io/client-go/kubernetes"
Expand All @@ -45,6 +48,7 @@ const (
maxUploadBytes = 32e6 // 32MB - same as app engine
maxRetry = 60
retryInterval = 1 * time.Second
logLevelKey = "activator"
)

type activationHandler struct {
Expand Down Expand Up @@ -127,7 +131,7 @@ func (a *activationHandler) handler(w http.ResponseWriter, r *http.Request) {
endpoint, status, err := a.act.ActiveEndpoint(namespace, name)
if err != nil {
msg := fmt.Sprintf("Error getting active endpoint: %v", err)
a.logger.Errorf(msg)
a.logger.Error(msg)
http.Error(w, msg, int(status))
return
}
Expand Down Expand Up @@ -157,9 +161,9 @@ func main() {
if err != nil {
log.Fatalf("Error parsing logging configuration: %v", err)
}
logger, _ := logging.NewLoggerFromConfig(config, "activator")
logger, atomicLevel := logging.NewLoggerFromConfig(config, logLevelKey)
defer logger.Sync()

logger = logger.With(zap.String(logkey.ControllerType, "activator"))
logger.Info("Starting the knative activator")

clusterConfig, err := rest.InClusterConfig()
Expand All @@ -186,6 +190,13 @@ func main() {
a.Shutdown()
}()

// Watch the logging config map and dynamically update logging levels.
configMapWatcher := configmap.NewDefaultWatcher(kubeClient, system.Namespace)
configMapWatcher.Watch(logging.ConfigName, logging.UpdateLevelFromConfigMap(logger, atomicLevel, logLevelKey))
if err = configMapWatcher.Start(stopCh); err != nil {
logger.Fatalf("failed to start configuration manager: %v", err)
}

http.HandleFunc("/", ah.handler)
h2c.ListenAndServe(":8080", nil)
}
15 changes: 14 additions & 1 deletion cmd/autoscaler/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ import (
"github.com/knative/serving/pkg/configmap"
"github.com/knative/serving/pkg/logging"
"github.com/knative/serving/pkg/logging/logkey"
"github.com/knative/serving/pkg/system"

"github.com/gorilla/websocket"

Expand All @@ -54,6 +55,7 @@ const (
// seconds while an http request is taking the full timeout of 5
// second.
scaleBufferSize = 10
logLevelKey = "autoscaler"
)

var (
Expand All @@ -70,6 +72,7 @@ var (
servingAutoscalerPort string
currentScale int32
logger *zap.SugaredLogger
atomicLevel zap.AtomicLevel

// Revision-level configuration
concurrencyModel = flag.String("concurrencyModel", string(v1alpha1.RevisionRequestConcurrencyModelMulti), "")
Expand Down Expand Up @@ -227,11 +230,12 @@ func main() {
if err != nil {
log.Fatalf("Error parsing logging configuration: %v", err)
}
logger, _ = logging.NewLoggerFromConfig(logginConfig, "autoscaler")
logger, atomicLevel = logging.NewLoggerFromConfig(logginConfig, logLevelKey)
defer logger.Sync()

initEnv()
logger = logger.With(
zap.String(logkey.ControllerType, "autoscaler"),
zap.String(logkey.Namespace, servingNamespace),
zap.String(logkey.Configuration, servingConfig),
zap.String(logkey.Revision, servingRevision))
Expand Down Expand Up @@ -266,6 +270,15 @@ func main() {
}
statsReporter = reporter

// Watch the logging config map and dynamically update logging levels.
stopCh := make(chan struct{})
defer close(stopCh)
configMapWatcher := configmap.NewDefaultWatcher(kubeClient, system.Namespace)
configMapWatcher.Watch(logging.ConfigName, logging.UpdateLevelFromConfigMap(logger, atomicLevel, logLevelKey))
if err := configMapWatcher.Start(stopCh); err != nil {
logger.Fatalf("failed to start configuration manager: %v", err)
}

go runAutoscaler()
go scaleSerializer()

Expand Down
24 changes: 3 additions & 21 deletions cmd/controller/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,13 +22,11 @@ import (
"time"

"github.com/knative/serving/pkg/configmap"
"go.uber.org/zap"

"github.com/knative/serving/pkg/controller"
"github.com/knative/serving/pkg/logging"

"github.com/knative/serving/pkg/system"
corev1 "k8s.io/api/core/v1"

vpa "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/clientset/versioned"
vpainformers "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/informers/externalversions"
Expand All @@ -52,6 +50,7 @@ import (

const (
threadsPerController = 2
logLevelKey = "controller"
)

var (
Expand All @@ -69,7 +68,7 @@ func main() {
if err != nil {
log.Fatalf("Error parsing logging configuration: %v", err)
}
logger, atomicLevel := logging.NewLoggerFromConfig(loggingConfig, "controller")
logger, atomicLevel := logging.NewLoggerFromConfig(loggingConfig, logLevelKey)
defer logger.Sync()

// set up signals so we handle the first shutdown signal gracefully
Expand Down Expand Up @@ -164,7 +163,7 @@ func main() {
}

// Watch the logging config map and dynamically update logging levels.
configMapWatcher.Watch(logging.ConfigName, receiveLoggingConfig(logger, atomicLevel))
configMapWatcher.Watch(logging.ConfigName, logging.UpdateLevelFromConfigMap(logger, atomicLevel, logLevelKey))

// These are non-blocking.
kubeInformerFactory.Start(stopCh)
Expand Down Expand Up @@ -213,20 +212,3 @@ func init() {
flag.StringVar(&kubeconfig, "kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.")
flag.StringVar(&masterURL, "master", "", "The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster.")
}

func receiveLoggingConfig(logger *zap.SugaredLogger, atomicLevel zap.AtomicLevel) func(configMap *corev1.ConfigMap) {
return func(configMap *corev1.ConfigMap) {
loggingConfig, err := logging.NewConfigFromConfigMap(configMap)
if err != nil {
logger.Error("Failed to parse the logging configmap. Previous config map will be used.", zap.Error(err))
return
}

if level, ok := loggingConfig.LoggingLevel["controller"]; ok {
if atomicLevel.Level() != level {
logger.Infof("Updating logging level from %v to %v.", atomicLevel.Level(), level)
atomicLevel.SetLevel(level)
}
}
}
}
63 changes: 0 additions & 63 deletions cmd/controller/main_test.go

This file was deleted.

19 changes: 16 additions & 3 deletions cmd/webhook/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ import (

"github.com/knative/serving/pkg/configmap"
"github.com/knative/serving/pkg/logging"
"github.com/knative/serving/pkg/logging/logkey"
"github.com/knative/serving/pkg/signals"
"github.com/knative/serving/pkg/system"
"github.com/knative/serving/pkg/webhook"
Expand All @@ -32,6 +33,10 @@ import (
"k8s.io/client-go/rest"
)

const (
logLevelKey = "webhook"
)

func main() {
flag.Parse()
cm, err := configmap.Load("/etc/config-logging")
Expand All @@ -42,8 +47,9 @@ func main() {
if err != nil {
log.Fatalf("Error parsing logging configuration: %v", err)
}
logger, _ := logging.NewLoggerFromConfig(config, "webhook")
logger, atomicLevel := logging.NewLoggerFromConfig(config, logLevelKey)
defer logger.Sync()
logger = logger.With(zap.String(logkey.ControllerType, "webhook"))

logger.Info("Starting the Configuration Webhook")

Expand All @@ -55,19 +61,26 @@ func main() {
logger.Fatal("Failed to get in cluster config", zap.Error(err))
}

clientset, err := kubernetes.NewForConfig(clusterConfig)
kubeClient, err := kubernetes.NewForConfig(clusterConfig)
if err != nil {
logger.Fatal("Failed to get the client set", zap.Error(err))
}

// Watch the logging config map and dynamically update logging levels.
configMapWatcher := configmap.NewDefaultWatcher(kubeClient, system.Namespace)
configMapWatcher.Watch(logging.ConfigName, logging.UpdateLevelFromConfigMap(logger, atomicLevel, logLevelKey))
if err = configMapWatcher.Start(stopCh); err != nil {
logger.Fatalf("failed to start configuration manager: %v", err)
}

options := webhook.ControllerOptions{
ServiceName: "webhook",
ServiceNamespace: system.Namespace,
Port: 443,
SecretName: "webhook-certs",
WebhookName: "webhook.knative.dev",
}
controller, err := webhook.NewAdmissionController(clientset, options, logger)
controller, err := webhook.NewAdmissionController(kubeClient, options, logger)
if err != nil {
logger.Fatal("Failed to create the admission controller", zap.Error(err))
}
Expand Down
7 changes: 4 additions & 3 deletions config/activator.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -39,9 +39,10 @@ spec:
- name: http
containerPort: 8080
args:
- "-logtostderr=true"
- "-stderrthreshold=INFO"

# Disable glog writing into stderr. Our code doesn't use glog
# and seeing k8s logs in addition to ours is not useful.
- "-logtostderr=false"
- "-stderrthreshold=FATAL"
volumeMounts:
- name: config-logging
mountPath: /etc/config-logging
Expand Down
3 changes: 3 additions & 0 deletions config/config-logging.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,9 @@ data:
}

# Log level overrides
# For all components except the autoscaler and queue proxy,
# changes are be picked up immediately.
# For autoscaler and queue proxy, changes require recreation of the pods.
loglevel.controller: "info"
loglevel.autoscaler: "info"
loglevel.queueproxy: "info"
Expand Down
Loading