diff --git a/cmd/fanoutsidecar/kodata/VENDOR-LICENSE b/cmd/fanoutsidecar/kodata/VENDOR-LICENSE index 91c1d5bb8f3..3cc89764519 120000 --- a/cmd/fanoutsidecar/kodata/VENDOR-LICENSE +++ b/cmd/fanoutsidecar/kodata/VENDOR-LICENSE @@ -1 +1 @@ -../../../VENDOR-LICENSE \ No newline at end of file +../../../third_party/VENDOR-LICENSE \ No newline at end of file diff --git a/config/provisioners/in-memory-channel/in-memory-channel.yaml b/config/provisioners/in-memory-channel/in-memory-channel.yaml index 44a15c66175..d9ad1468a72 100644 --- a/config/provisioners/in-memory-channel/in-memory-channel.yaml +++ b/config/provisioners/in-memory-channel/in-memory-channel.yaml @@ -181,3 +181,259 @@ spec: - --config_map_noticer=watcher - --config_map_namespace=knative-eventing - --config_map_name=in-memory-channel-dispatcher-config-map + +--- + +# From here to the end of the file is essentially a copied version of +# https://github.com/knative/serving/blob/master/config/202-gateway.yaml, with the names and labels +# changed. It also switches the Service from LoadBalancer -> ClusterIP so that it cannot be +# reached from outside the cluster. +# +# Ideally we would not copy paste this much code, which has to stay in-sync with Istio itself. +# +# Overall we are createing a Gateway, the Service that backs it, the Deployment that backs that +# Service, and the Autoscaler that scales the Deployment. + +apiVersion: networking.istio.io/v1alpha3 +kind: Gateway +metadata: + name: knative-eventing-private-ingressgateway + namespace: istio-system +spec: + selector: + knative-eventing: private-ingressgateway + servers: + - port: + number: 80 + name: http + protocol: HTTP + hosts: + - "*" + - port: + number: 443 + name: https + protocol: HTTPS + hosts: + - "*" + tls: + mode: PASSTHROUGH + +--- + + +apiVersion: v1 +kind: Service +metadata: + name: knative-eventing-private-ingressgateway + namespace: istio-system + annotations: + labels: + chart: gateways-1.0.1 + release: RELEASE-NAME + heritage: Tiller + app: knative-eventing-private-ingressgateway + knative-eventing: private-ingressgateway +spec: + type: ClusterIP + selector: + app: knative-eventing-private-ingressgateway + knative-eventing: private-ingressgateway + ports: + - + name: http2 + port: 80 + targetPort: 80 + - + name: https + port: 443 + - + name: tcp + port: 31400 + - + name: tcp-pilot-grpc-tls + port: 15011 + targetPort: 15011 + - + name: tcp-citadel-grpc-tls + port: 8060 + targetPort: 8060 + - + name: tcp-dns-tls + port: 853 + targetPort: 853 + - + name: http2-prometheus + port: 15030 + targetPort: 15030 + - + name: http2-grafana + port: 15031 + targetPort: 15031 +--- +# This is the corresponding Deployment to backed the aforementioned Service. +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: knative-eventing-private-ingressgateway + namespace: istio-system + labels: + chart: gateways-1.0.1 + release: RELEASE-NAME + heritage: Tiller + app: knative-eventing-private-ingressgateway + knative-eventing: private-ingressgateway +spec: + replicas: 1 + selector: + matchLabels: &labels + app: knative-eventing-private-ingressgateway + knative-eventing: private-ingressgateway + template: + metadata: + labels: *labels + annotations: + sidecar.istio.io/inject: "false" + scheduler.alpha.kubernetes.io/critical-pod: "" + spec: + serviceAccountName: istio-ingressgateway-service-account + containers: + - name: istio-proxy + image: "docker.io/istio/proxyv2:1.0.2" + imagePullPolicy: IfNotPresent + ports: + - containerPort: 80 + - containerPort: 443 + - containerPort: 31400 + - containerPort: 15011 + - containerPort: 8060 + - containerPort: 853 + - containerPort: 15030 + - containerPort: 15031 + args: + - proxy + - router + - -v + - "2" + - --discoveryRefreshDelay + - '1s' #discoveryRefreshDelay + - --drainDuration + - '45s' #drainDuration + - --parentShutdownDuration + - '1m0s' #parentShutdownDuration + - --connectTimeout + - '10s' #connectTimeout + - --serviceCluster + - knative-ingressgateway + - --zipkinAddress + - zipkin:9411 + - --statsdUdpAddress + - istio-statsd-prom-bridge:9125 + - --proxyAdminPort + - "15000" + - --controlPlaneAuthPolicy + - NONE + - --discoveryAddress + - istio-pilot:8080 + resources: + requests: + cpu: 10m + + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: INSTANCE_IP + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: status.podIP + - name: ISTIO_META_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + volumeMounts: + - name: istio-certs + mountPath: /etc/certs + readOnly: true + - name: ingressgateway-certs + mountPath: "/etc/istio/ingressgateway-certs" + readOnly: true + - name: ingressgateway-ca-certs + mountPath: "/etc/istio/ingressgateway-ca-certs" + readOnly: true + volumes: + - name: istio-certs + secret: + secretName: istio.istio-ingressgateway-service-account + optional: true + - name: ingressgateway-certs + secret: + secretName: "istio-ingressgateway-certs" + optional: true + - name: ingressgateway-ca-certs + secret: + secretName: "istio-ingressgateway-ca-certs" + optional: true + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - amd64 + - ppc64le + - s390x + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - amd64 + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - ppc64le + - weight: 2 + preference: + matchExpressions: + - key: beta.kubernetes.io/arch + operator: In + values: + - s390x +--- + +# This is the horizontal pod autoscaler to make sure the ingress Pods +# scale up to meet traffic demand. +# +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: knative-eventing-private-ingressgateway + namespace: istio-system +spec: + # TODO(1411): Document/fix this. We are choosing an arbitrary 10 here. + maxReplicas: 10 + minReplicas: 1 + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: knative-eventing-private-ingressgateway + metrics: + - type: Resource + resource: + name: cpu + targetAverageUtilization: 60 diff --git a/pkg/controller/eventing/inmemory/channel/reconcile.go b/pkg/controller/eventing/inmemory/channel/reconcile.go index 439361e8978..234041f0e65 100644 --- a/pkg/controller/eventing/inmemory/channel/reconcile.go +++ b/pkg/controller/eventing/inmemory/channel/reconcile.go @@ -41,9 +41,10 @@ import ( ) const ( - portName = "http" portNumber = 80 finalizerName = controllerAgentName + + privateEventingIngressGateway = "knative-eventing-private-ingressgateway.istio-system.svc.cluster.local" ) type reconciler struct { @@ -257,12 +258,9 @@ func newK8sService(c *eventingv1alpha1.Channel) *corev1.Service { }, }, Spec: corev1.ServiceSpec{ - Ports: []corev1.ServicePort{ - { - Name: portName, - Port: portNumber, - }, - }, + Type: corev1.ServiceTypeExternalName, + // Keep in sync with the Gateway in newVirtualService(). + ExternalName: privateEventingIngressGateway, }, } } @@ -290,6 +288,10 @@ func newVirtualService(channel *eventingv1alpha1.Channel) *istiov1alpha3.Virtual }, }, Spec: istiov1alpha3.VirtualServiceSpec{ + Gateways: []string{ + privateEventingIngressGateway, + "mesh", + }, Hosts: []string{ controller.ServiceHostName(controller.ChannelServiceName(channel.Name), channel.Namespace), controller.ChannelHostName(channel.Name, channel.Namespace), diff --git a/pkg/controller/eventing/inmemory/channel/reconcile_test.go b/pkg/controller/eventing/inmemory/channel/reconcile_test.go index 13466bad4ae..50683024585 100644 --- a/pkg/controller/eventing/inmemory/channel/reconcile_test.go +++ b/pkg/controller/eventing/inmemory/channel/reconcile_test.go @@ -589,12 +589,8 @@ func makeK8sService() *corev1.Service { }, }, Spec: corev1.ServiceSpec{ - Ports: []corev1.ServicePort{ - { - Name: portName, - Port: portNumber, - }, - }, + Type: corev1.ServiceTypeExternalName, + ExternalName: privateEventingIngressGateway, }, } } @@ -630,6 +626,10 @@ func makeVirtualService() *istiov1alpha3.VirtualService { }, }, Spec: istiov1alpha3.VirtualServiceSpec{ + Gateways: []string{ + privateEventingIngressGateway, + "mesh", + }, Hosts: []string{ fmt.Sprintf("%s-channel.%s.svc.cluster.local", cName, cNamespace), fmt.Sprintf("%s.%s.channels.cluster.local", cName, cNamespace),