diff --git a/examples/external-traffic-policy-local.yml b/examples/external-traffic-policy-local.yml new file mode 100644 index 0000000..8f4e271 --- /dev/null +++ b/examples/external-traffic-policy-local.yml @@ -0,0 +1,61 @@ +# This example is the same as nginx-hello.yml, except that it uses +# externalTrafficPolicy: Local on the loadbalancer service. +# +# export KUBECONFIG=path/to/kubeconfig +# kubectl apply -f external-traffic-policy-local.yml +# +# Wait for `kubectl describe service local-hello` to show "Loadbalancer Ensured", +# then use the IP address found under "LoadBalancer Ingress" to connect to the +# service. +# +# You can also use the following shortcut: +# +# curl http://$(kubectl get service local-hello -o jsonpath='{.status.loadBalancer.ingress[0].ip}') +# +# If you follow the nginx log, you will see that nginx sees one of load balancer IPs as source of +# requests: +# +# kubectl logs -l "app=local-hello" +# +# A load balancer health monitor has been configured to check which node a replica is scheduled on. +# You can use the following script to mimic the check performed by the load balancer health monitor: +# +# HEALTH_CHECK_PORT=$(kubectl get svc local-hello -o jsonpath='{.spec.healthCheckNodePort}') +# NODE_IPS=$(kubectl get nodes --selector='!node.kubernetes.io/exclude-from-external-load-balancers' -o jsonpath='{.items[*].status.addresses[1].address}') +# for NODE_IP in $NODE_IPS; do curl -s -o /dev/null -w "%{remote_ip}\t%{http_code}\t%{url_effective}\n" "http://$NODE_IP:$HEALTH_CHECK_PORT/livez"; done +# +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: local-hello +spec: + replicas: 2 + selector: + matchLabels: + app: local-hello + template: + metadata: + labels: + app: local-hello + spec: + containers: + - name: hello + image: nginxdemos/hello:plain-text +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: local-hello + name: local-hello +spec: + ports: + - port: 80 + protocol: TCP + targetPort: 80 + name: http + selector: + app: local-hello + type: LoadBalancer + externalTrafficPolicy: Local diff --git a/examples/ingress.yaml b/examples/ingress.yaml new file mode 100644 index 0000000..25b1bbb --- /dev/null +++ b/examples/ingress.yaml @@ -0,0 +1,116 @@ +# Deploys two nginxdemos/hello:plain-text containers ("blue" and "red") +# and creates ClusterIP services named "blue-svc" and "red-svc" for the +# deployments. Additionally, an Ingress resource named "simple" is set +# up to route requests for "/red" to "red-svc" service and +# requests for "/blue" to the "blue-svc" service. +# +# This examples needs ingress-nginx to work, see: +# https://kubernetes.github.io/ingress-nginx/deploy/#quick-start +# +# export KUBECONFIG=path/to/kubeconfig +# kubectl apply -f ingress.yaml +# +# Wait for `kubectl describe service ingress-nginx-controller -n ingress-nginx` to +# show "Loadbalancer Ensured", then use the IP address found under "LoadBalancer +# Ingress" to connect to the service. +# +# You can also use the following shortcut: +# +# curl http://$(kubectl get service ingress-nginx-controller -n ingress-nginx -o jsonpath='{.status.loadBalancer.ingress[0].ip}')/blue +# curl http://$(kubectl get service ingress-nginx-controller -n ingress-nginx -o jsonpath='{.status.loadBalancer.ingress[0].ip}')/red +# +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: blue + name: blue +spec: + replicas: 1 + selector: + matchLabels: + app: blue + template: + metadata: + labels: + app: blue + spec: + containers: + - image: nginxdemos/hello:plain-text + name: hello +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: blue + name: blue-svc +spec: + ports: + - port: 80 + protocol: TCP + targetPort: 80 + selector: + app: blue +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: red + name: red +spec: + replicas: 1 + selector: + matchLabels: + app: red + template: + metadata: + labels: + app: red + spec: + containers: + - image: nginxdemos/hello:plain-text + name: hello + resources: { } +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: red + name: red-svc +spec: + ports: + - port: 80 + protocol: TCP + targetPort: 80 + selector: + app: red +--- +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: simple + annotations: + nginx.ingress.kubernetes.io/rewrite-target: / +spec: + ingressClassName: nginx + rules: + - http: + paths: + - backend: + service: + name: red-svc + port: + number: 80 + path: /red + pathType: Exact + - backend: + service: + name: blue-svc + port: + number: 80 + path: /blue + pathType: Exact diff --git a/examples/nginx-hello.yml b/examples/nginx-hello.yml index 407365e..306a9fb 100644 --- a/examples/nginx-hello.yml +++ b/examples/nginx-hello.yml @@ -12,6 +12,11 @@ # # curl http://$(kubectl get service hello -o jsonpath='{.status.loadBalancer.ingress[0].ip}') # +# If you follow the nginx log, you will see that nginx sees a cluster internal +# IP address as source of requests: +# +# kubectl logs -l "app=hello" +# --- apiVersion: apps/v1 kind: Deployment diff --git a/examples/proxy-v2-protocol.yml b/examples/proxy-v2-protocol.yml new file mode 100644 index 0000000..8bf2e38 --- /dev/null +++ b/examples/proxy-v2-protocol.yml @@ -0,0 +1,131 @@ +# This example is the same as nginx-hello.yml, except that it uses +# the PROXYv2 protocol between the load balancer and the nginx server +# in order to preserve the original source IP. +# +# export KUBECONFIG=path/to/kubeconfig +# kubectl apply -f proxy-v2-protocol.yml +# +# Wait for `kubectl describe service proxy-hello` to show "Loadbalancer Ensured", +# then use the IP address found under "LoadBalancer Ingress" to connect to the +# service. +# +# You can also use the following shortcut: +# +# curl http://$(kubectl get service proxy-hello -o jsonpath='{.status.loadBalancer.ingress[0].ip}') +# +# If you follow the nginx log, you will see that nginx sees the original source +# IP address as source of requests (also not the nginx config injected using +# config maps): +# +# kubectl logs -l "app=proxy-hello" +# +# Starting with Kubernetes 1.30 requests originating from within the cluster work out of the box too: +# +# kubectl run -it --rm --restart=Never curl --image=curlimages/curl -- curl http://$(kubectl get service proxy-hello -o jsonpath='{.status.loadBalancer.ingress[0].ip}') +# +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: proxy-hello +spec: + replicas: 2 + selector: + matchLabels: + app: proxy-hello + template: + metadata: + labels: + app: proxy-hello + spec: + containers: + - name: proxy-hello + image: nginxdemos/hello:plain-text + volumeMounts: + - name: nginx-config-volume + mountPath: /etc/nginx/nginx.conf + subPath: nginx.conf + - name: hello-plain-config-volume + mountPath: /etc/nginx/conf.d/hello-plain-text.conf + subPath: hello-plain-text.conf + volumes: + - name: nginx-config-volume + configMap: + name: nginx-config + - name: hello-plain-config-volume + configMap: + name: hello-plain-config +--- +apiVersion: v1 +data: + nginx.conf: |2 + + user nginx; + worker_processes auto; + + error_log /var/log/nginx/error.log notice; + pid /var/run/nginx.pid; + + + events { + worker_connections 1024; + } + + + http { + include /etc/nginx/mime.types; + default_type application/octet-stream; + + log_format main '$proxy_protocol_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for"'; + + access_log /var/log/nginx/access.log main; + + sendfile on; + #tcp_nopush on; + + keepalive_timeout 65; + + #gzip on; + + include /etc/nginx/conf.d/*.conf; + } +kind: ConfigMap +metadata: + name: nginx-config +--- +apiVersion: v1 +data: + hello-plain-text.conf: | + server { + listen 80 proxy_protocol; + listen [::]:80 proxy_protocol; + + location / { + default_type text/plain; + expires -1; + return 200 'Server address: $server_addr:$server_port\nServer name: $hostname\nDate: $time_local\nURI: $request_uri\nRequest ID: $request_id\n'; + } + } +kind: ConfigMap +metadata: + name: hello-plain-config +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: proxy-hello + name: proxy-hello + annotations: + k8s.cloudscale.ch/loadbalancer-pool-protocol: proxyv2 +spec: + ports: + - port: 80 + protocol: TCP + targetPort: 80 + name: http + selector: + app: proxy-hello + type: LoadBalancer