Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
61 changes: 61 additions & 0 deletions examples/external-traffic-policy-local.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
# This example is the same as nginx-hello.yml, except that it uses
# externalTrafficPolicy: Local on the loadbalancer service.
#
# export KUBECONFIG=path/to/kubeconfig
# kubectl apply -f external-traffic-policy-local.yml
#
# Wait for `kubectl describe service local-hello` to show "Loadbalancer Ensured",
# then use the IP address found under "LoadBalancer Ingress" to connect to the
# service.
#
# You can also use the following shortcut:
#
# curl http://$(kubectl get service local-hello -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
#
# If you follow the nginx log, you will see that nginx sees one of load balancer IPs as source of
# requests:
#
# kubectl logs -l "app=local-hello"
#
# A load balancer health monitor has been configured to check which node a replica is scheduled on.
# You can use the following script to mimic the check performed by the load balancer health monitor:
#
# HEALTH_CHECK_PORT=$(kubectl get svc local-hello -o jsonpath='{.spec.healthCheckNodePort}')
# NODE_IPS=$(kubectl get nodes --selector='!node.kubernetes.io/exclude-from-external-load-balancers' -o jsonpath='{.items[*].status.addresses[1].address}')
# for NODE_IP in $NODE_IPS; do curl -s -o /dev/null -w "%{remote_ip}\t%{http_code}\t%{url_effective}\n" "http://$NODE_IP:$HEALTH_CHECK_PORT/livez"; done
#
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: local-hello
spec:
replicas: 2
selector:
matchLabels:
app: local-hello
template:
metadata:
labels:
app: local-hello
spec:
containers:
- name: hello
image: nginxdemos/hello:plain-text
---
apiVersion: v1
kind: Service
metadata:
labels:
app: local-hello
name: local-hello
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
name: http
selector:
app: local-hello
type: LoadBalancer
externalTrafficPolicy: Local
116 changes: 116 additions & 0 deletions examples/ingress.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,116 @@
# Deploys two nginxdemos/hello:plain-text containers ("blue" and "red")
# and creates ClusterIP services named "blue-svc" and "red-svc" for the
# deployments. Additionally, an Ingress resource named "simple" is set
# up to route requests for "/red" to "red-svc" service and
# requests for "/blue" to the "blue-svc" service.
#
# This examples needs ingress-nginx to work, see:
# https://kubernetes.github.io/ingress-nginx/deploy/#quick-start
#
# export KUBECONFIG=path/to/kubeconfig
# kubectl apply -f ingress.yaml
#
# Wait for `kubectl describe service ingress-nginx-controller -n ingress-nginx` to
# show "Loadbalancer Ensured", then use the IP address found under "LoadBalancer
# Ingress" to connect to the service.
#
# You can also use the following shortcut:
#
# curl http://$(kubectl get service ingress-nginx-controller -n ingress-nginx -o jsonpath='{.status.loadBalancer.ingress[0].ip}')/blue
# curl http://$(kubectl get service ingress-nginx-controller -n ingress-nginx -o jsonpath='{.status.loadBalancer.ingress[0].ip}')/red
#
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: blue
name: blue
spec:
replicas: 1
selector:
matchLabels:
app: blue
template:
metadata:
labels:
app: blue
spec:
containers:
- image: nginxdemos/hello:plain-text
name: hello
---
apiVersion: v1
kind: Service
metadata:
labels:
app: blue
name: blue-svc
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: blue
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: red
name: red
spec:
replicas: 1
selector:
matchLabels:
app: red
template:
metadata:
labels:
app: red
spec:
containers:
- image: nginxdemos/hello:plain-text
name: hello
resources: { }
---
apiVersion: v1
kind: Service
metadata:
labels:
app: red
name: red-svc
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
selector:
app: red
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: simple
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /
spec:
ingressClassName: nginx
rules:
- http:
paths:
- backend:
service:
name: red-svc
port:
number: 80
path: /red
pathType: Exact
- backend:
service:
name: blue-svc
port:
number: 80
path: /blue
pathType: Exact
5 changes: 5 additions & 0 deletions examples/nginx-hello.yml
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,11 @@
#
# curl http://$(kubectl get service hello -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
#
# If you follow the nginx log, you will see that nginx sees a cluster internal
# IP address as source of requests:
#
# kubectl logs -l "app=hello"
#
---
apiVersion: apps/v1
kind: Deployment
Expand Down
131 changes: 131 additions & 0 deletions examples/proxy-v2-protocol.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,131 @@
# This example is the same as nginx-hello.yml, except that it uses
# the PROXYv2 protocol between the load balancer and the nginx server
# in order to preserve the original source IP.
#
# export KUBECONFIG=path/to/kubeconfig
# kubectl apply -f proxy-v2-protocol.yml
#
# Wait for `kubectl describe service proxy-hello` to show "Loadbalancer Ensured",
# then use the IP address found under "LoadBalancer Ingress" to connect to the
# service.
#
# You can also use the following shortcut:
#
# curl http://$(kubectl get service proxy-hello -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
#
# If you follow the nginx log, you will see that nginx sees the original source
# IP address as source of requests (also not the nginx config injected using
# config maps):
#
# kubectl logs -l "app=proxy-hello"
#
# Starting with Kubernetes 1.30 requests originating from within the cluster work out of the box too:
#
# kubectl run -it --rm --restart=Never curl --image=curlimages/curl -- curl http://$(kubectl get service proxy-hello -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
#
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: proxy-hello
spec:
replicas: 2
selector:
matchLabels:
app: proxy-hello
template:
metadata:
labels:
app: proxy-hello
spec:
containers:
- name: proxy-hello
image: nginxdemos/hello:plain-text
volumeMounts:
- name: nginx-config-volume
mountPath: /etc/nginx/nginx.conf
subPath: nginx.conf
- name: hello-plain-config-volume
mountPath: /etc/nginx/conf.d/hello-plain-text.conf
subPath: hello-plain-text.conf
volumes:
- name: nginx-config-volume
configMap:
name: nginx-config
- name: hello-plain-config-volume
configMap:
name: hello-plain-config
---
apiVersion: v1
data:
nginx.conf: |2

Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Nit: I think this space, and a bunch of the double spaces in this config are not needed.

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I have created this part using kubectl create configmap --from-file= using the original nginx config file from the image. Therefore, I prefer to not fiddly around with this part more than necessary. I hope this is okay.

user nginx;
worker_processes auto;

error_log /var/log/nginx/error.log notice;
pid /var/run/nginx.pid;


events {
worker_connections 1024;
}


http {
include /etc/nginx/mime.types;
default_type application/octet-stream;

log_format main '$proxy_protocol_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';

access_log /var/log/nginx/access.log main;

sendfile on;
#tcp_nopush on;

keepalive_timeout 65;

#gzip on;

include /etc/nginx/conf.d/*.conf;
}
kind: ConfigMap
metadata:
name: nginx-config
---
apiVersion: v1
data:
hello-plain-text.conf: |
server {
listen 80 proxy_protocol;
listen [::]:80 proxy_protocol;

location / {
default_type text/plain;
expires -1;
return 200 'Server address: $server_addr:$server_port\nServer name: $hostname\nDate: $time_local\nURI: $request_uri\nRequest ID: $request_id\n';
}
}
kind: ConfigMap
metadata:
name: hello-plain-config
---
apiVersion: v1
kind: Service
metadata:
labels:
app: proxy-hello
name: proxy-hello
annotations:
k8s.cloudscale.ch/loadbalancer-pool-protocol: proxyv2
spec:
ports:
- port: 80
protocol: TCP
targetPort: 80
name: http
selector:
app: proxy-hello
type: LoadBalancer