From 0190820da403237780782bcf4c520c55cc262cff Mon Sep 17 00:00:00 2001 From: Matthias Wessendorf Date: Thu, 8 Nov 2018 15:49:30 +0100 Subject: [PATCH] Updating docs, based on feedback --- config/provisioners/kafka/README.md | 52 ++++++++++++++++--- config/provisioners/kafka/broker/README.md | 2 +- config/provisioners/kafka/strimzi/README.md | 31 ----------- .../kafka/strimzi/kafka-ephemeral.yaml | 23 -------- .../kafka/strimzi/kafka-persistent.yaml | 27 ---------- 5 files changed, 47 insertions(+), 88 deletions(-) delete mode 100644 config/provisioners/kafka/strimzi/README.md delete mode 100644 config/provisioners/kafka/strimzi/kafka-ephemeral.yaml delete mode 100644 config/provisioners/kafka/strimzi/kafka-persistent.yaml diff --git a/config/provisioners/kafka/README.md b/config/provisioners/kafka/README.md index cf9bb522541..b8c18cf31b7 100644 --- a/config/provisioners/kafka/README.md +++ b/config/provisioners/kafka/README.md @@ -2,15 +2,35 @@ Deployment steps: 1. Setup [Knative Eventing](../../../DEVELOPMENT.md) -1. Install an Apache Kafka cluster. There are two choices: +1. If not done already, install an Apache Kafka cluster. There are two choices: * Simple installation of [Apache Kafka](broker). * A production grade installation using the [Strimzi Kafka Operator](strimzi). + Installation [guides](http://strimzi.io/quickstarts/) are provided for + kubernetes and Openshift. -1. Now that the Apache Kafka is installed, apply the 'Kafka' ClusterChannelProvisioner: +1. Now that Apache Kafka is installed, you need to configure the +`bootstrap_servers` value in the `kafka-channel-controller-config` ConfigMap, +located inside the `config/provisioners/kafka/kafka-provisioner.yaml` file: + ``` + ... + apiVersion: v1 + kind: ConfigMap + metadata: + name: kafka-channel-controller-config + namespace: knative-eventing + data: + # Broker URL's for the provisioner + bootstrap_servers: kafkabroker.kafka:9092 + ... + ``` + > Note: The `bootstrap_servers` needs to contain the address of at least + one broker of your Apache Kafka cluster. If you are using Strimzi, you need + to update the `bootstrap_servers` value to + `my-cluster-kafka-bootstrap.mynamespace:9092`. +1. Apply the 'Kafka' ClusterChannelProvisioner, Controller, and Dispatcher: ``` ko apply -f config/provisioners/kafka/kafka-provisioner.yaml ``` - > Note: If you are using Strimzi, you need to update the `KAFKA_BOOTSTRAP_SERVERS` value in the `kafka-channel-controller-config` ConfigMap to `my-cluster-kafka-bootstrap.kafka.9092`. 1. Create Channels that reference the 'kafka-channel'. ```yaml @@ -24,15 +44,35 @@ Deployment steps: kind: ClusterChannelProvisioner name: kafka-channel ``` -1. (Optional) Install [Kail](https://github.com/boz/kail) - Kubernetes tail ## Components The major components are: * ClusterChannelProvisioner Controller -* Channel Controller Config Map +* Channel Controller +* Channel Controller Config Map. +* Channel Dispatcher +* Channel Dispatcher Config Map. -The ClusterChannelProvisioner Controller and the Channel Controller are colocated in one Pod. +The ClusterChannelProvisioner Controller and the Channel Controller are colocated +in one Pod: ```shell kubectl get deployment -n knative-eventing kafka-channel-controller ``` + +The Channel Controller Config Map is used to configure the `bootstrap_servers` +of your Apache Kafka installation: +```shell +kubectl get configmap -n knative-eventing kafka-channel-dispatcher-config-map +``` + +The Channel Dispatcher receives and distributes all events: +```shell +kubectl get statefulset -n knative-eventing kafka-channel-dispatcher +``` + +The Channel Dispatcher Config Map is used to send information about Channels and +Subscriptions from the Channel Controller to the Channel Dispatcher: +```shell +kubectl get configmap -n knative-eventing kafka-channel-dispatcher-config-map +``` diff --git a/config/provisioners/kafka/broker/README.md b/config/provisioners/kafka/broker/README.md index 0c75dde69b5..b47d0945734 100644 --- a/config/provisioners/kafka/broker/README.md +++ b/config/provisioners/kafka/broker/README.md @@ -1,6 +1,6 @@ # Apache Kafka - simple installation -1. For an installation of a simple Apache Kafka cluster, a setup is provided: +1. For an installation of a simple (**non production**) Apache Kafka cluster, a setup is provided: ``` kubectl create namespace kafka kubectl apply -n kafka -f kafka-broker.yaml diff --git a/config/provisioners/kafka/strimzi/README.md b/config/provisioners/kafka/strimzi/README.md deleted file mode 100644 index efd7d5437df..00000000000 --- a/config/provisioners/kafka/strimzi/README.md +++ /dev/null @@ -1,31 +0,0 @@ -# Strimzi - Apache Kafka Operator - -[Strimzi](http://strimzi.io) makes it easy to run a production grade Apache Kafka installation on OpenShift or Kubernetes. It implements the _Kubernetes Operator pattern_ for mananging `clusters`, `topics` or `users` based on custom resource files. - -Installing the Strimzi Cluster Operator is simple and requires only a few steps. - -1. Create the `kafka` namespace in your Kubernetes cluster: - ``` - kubectl create namespace kafka - ``` - -1. Install the Strimzi _Cluster Operator_: - - * Applying yaml files from the [Strimzi release bundle](https://github.com/strimzi/strimzi-kafka-operator/releases/latest) - * Using the Strimzi Helm Chart - - Both ways for installing the _Cluster Operator_ are described in the [Strimzi documentation](http://strimzi.io/docs/master/#cluster-operator-str) itself - - > Note: Once this is done, you will have a `strimzi-cluster-operator` pod, which is able to install the Apache Kafka broker based on a `Kafka` custom resource file. - -1. Install the Apache Kafka cluster by providing the `kafka-persistent.yaml` Strimzi resource file from _this_ folder: - ``` - kubectl apply -f kafka-persistent.yaml -n kafka - ``` - > Note: If you want to use ephemeral storage, you have to use the `kafka-ephemeral.yaml` file. - - This provisions the complete installation of your Apache Kafka cluster. - -> Note: For learning more about Strimiz, please consult its [website](http://strimzi.io). - -Continue the configuration of Knative Eventing with [step `3`](../). diff --git a/config/provisioners/kafka/strimzi/kafka-ephemeral.yaml b/config/provisioners/kafka/strimzi/kafka-ephemeral.yaml deleted file mode 100644 index 6423bd39de9..00000000000 --- a/config/provisioners/kafka/strimzi/kafka-ephemeral.yaml +++ /dev/null @@ -1,23 +0,0 @@ -apiVersion: kafka.strimzi.io/v1alpha1 -kind: Kafka -metadata: - name: my-cluster -spec: - kafka: - replicas: 1 - listeners: - plain: {} - tls: {} - config: - offsets.topic.replication.factor: 3 - transaction.state.log.replication.factor: 3 - transaction.state.log.min.isr: 2 - storage: - type: ephemeral - zookeeper: - replicas: 1 - storage: - type: ephemeral - entityOperator: - topicOperator: {} - userOperator: {} diff --git a/config/provisioners/kafka/strimzi/kafka-persistent.yaml b/config/provisioners/kafka/strimzi/kafka-persistent.yaml deleted file mode 100644 index ea5fd60ce4d..00000000000 --- a/config/provisioners/kafka/strimzi/kafka-persistent.yaml +++ /dev/null @@ -1,27 +0,0 @@ -apiVersion: kafka.strimzi.io/v1alpha1 -kind: Kafka -metadata: - name: my-cluster -spec: - kafka: - replicas: 1 - listeners: - plain: {} - tls: {} - config: - offsets.topic.replication.factor: 3 - transaction.state.log.replication.factor: 3 - transaction.state.log.min.isr: 2 - storage: - type: persistent-claim - size: 1Gi - deleteClaim: false - zookeeper: - replicas: 1 - storage: - type: persistent-claim - size: 1Gi - deleteClaim: false - entityOperator: - topicOperator: {} - userOperator: {}