Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,95 @@

# Master nodes Gateways
{{- range $i, $lb := .Values.loadBalancers.yugabyteMasterNodes }}

---
apiVersion: v1
kind: ConfigMap
metadata:
name: yb-proxy-config-{{$i}}
data:
haproxy.cfg: |
global
log stdout format raw local0
maxconn 4096

defaults
mode tcp
log global
# We set high timeouts to avoid disconnects with low activitiy
timeout client 12h
timeout server 12h
timeout tunnel 12h
timeout connect 5s
# We enable TCP keep alives on client and server side
option clitcpka
option srvtcpka
# K8s services may not be ready when HaProxy start, we ignore errors
default-server init-addr libc,none

resolvers dns
parse-resolv-conf
# We limit DNS validity to 5s to react to changes on K8s services
hold valid 5s

frontend master-grpc-f
bind :7100
default_backend master-grpc-b

backend master-grpc-b
server yb-master-{{$i}} yb-master-{{$i}}.yb-masters.default.svc.cluster.local:7100 check resolvers dns

frontend tserver-grpc-f
bind :9100
default_backend tserver-grpc-b

backend tserver-grpc-b
server yb-tserver-{{$i}} yb-tserver-{{$i}}.yb-tservers.default.svc.cluster.local:9100 check resolvers dns

---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
name: yugabyte-proxy-{{$i}}
name: yugabyte-proxy-{{$i}}
spec:
replicas: 2 # We deploy two instances to provide resilience if one Kubernetes node goes down.
selector:
matchLabels:
app: yugabyte-proxy-{{$i}}
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
labels:
app: yugabyte-proxy-{{$i}}
annotations:
release: {{$.Release.Name}}/{{$.Release.Revision}}
spec:
containers:
- name: yugabyte-proxy
image: "haproxy:3.3"
imagePullPolicy: "Always"
ports:
- containerPort: 7100
name: master-grpc
- containerPort: 9100
name: tserver-grpc
volumeMounts:
- name: config-volume
mountPath: /usr/local/etc/haproxy/
readOnly: true
volumes:
- name: config-volume
configMap:
name: yb-proxy-config-{{$i}}

---

apiVersion: v1
kind: Service
metadata:
Expand Down Expand Up @@ -33,8 +121,7 @@ spec:
targetPort: 9100
publishNotReadyAddresses: true
selector:
yugabytedUi: "true"
apps.kubernetes.io/pod-index: "{{$i}}"
app: yugabyte-proxy-{{$i}}
type: LoadBalancer
{{- end }}
{{- end }}
113 changes: 110 additions & 3 deletions deploy/services/tanka/yugabyte-auxiliary.libsonnet
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ local yugabyteLB(metadata, name, ip) =
--placement_zone=%s
--use_private_ip=zone
--node_to_node_encryption_use_client_certificates=true
--ysql_hba_conf_csv='hostssl all all 0.0.0.0/0 cert'
--ysql_hba_conf_csv=hostssl all all 0.0.0.0/0 cert
||| % [
std.join(",", metadata.yugabyte.masterAddresses),
metadata.yugabyte.tserver.rpc_bind_addresses,
Expand Down Expand Up @@ -202,8 +202,7 @@ local yugabyteLB(metadata, name, ip) =
},
spec+: {
selector: {
yugabytedUi: "true",
"apps.kubernetes.io/pod-index": '' + i,
app: 'yugabyte-proxy-' + i
},
publishNotReadyAddresses: true,
ports: [
Expand All @@ -218,6 +217,114 @@ local yugabyteLB(metadata, name, ip) =
],
},
} for i in std.range(0, std.length(metadata.yugabyte.tserverNodeIPs) - 1)
},
ProxyConfig: {
["yb-proxy-config-" + i]: base.ConfigMap(metadata, 'yb-proxy-config-' + i) {
data: {
"haproxy.cfg": |||
global
log stdout format raw local0
maxconn 4096

defaults
mode tcp
log global
# We set high timeouts to avoid disconnects with low activitiy
timeout client 12h
timeout server 12h
timeout tunnel 12h
timeout connect 5s
# We enable TCP keep alives on client and server side
option clitcpka
option srvtcpka
# K8s services may not be ready when HaProxy start, we ignore errors
default-server init-addr libc,none

resolvers dns
parse-resolv-conf
# We limit DNS validity to 5s to react to changes on K8s services
hold valid 5s

frontend master-grpc-f
bind :7100
default_backend master-grpc-b

backend master-grpc-b
server yb-master-%s yb-master-%s.yb-masters.%s.svc.cluster.local:7100 check resolvers dns

frontend tserver-grpc-f
bind :9100
default_backend tserver-grpc-b

backend tserver-grpc-b
server yb-tserver-%s yb-tserver-%s.yb-tservers.%s.svc.cluster.local:9100 check resolvers dns
||| % [i, i, metadata.namespace, i, i, metadata.namespace]
}
} for i in std.range(0, std.length(metadata.yugabyte.tserverNodeIPs) - 1)
},
Proxy: {
["yugabyte-proxy-" + i]: base.Deployment(metadata, 'yugabyte-proxy-' + i) {
apiVersion: 'apps/v1',
kind: 'Deployment',
metadata+: {
namespace: metadata.namespace,
labels: {
name: 'yugabyte-proxy-' + i
}
},
spec+: {
replicas: 2, # We deploy two instances to provide resilience if one nodes goes down.
selector: {
matchLabels: {
app: 'yugabyte-proxy-' + i
}
},
strategy: {
rollingUpdate: {
maxSurge: "25%",
maxUnavailable: "25%",
},
type: "RollingUpdate",
},
template+: {
metadata+: {
labels: {
app: 'yugabyte-proxy-' + i
}
},
spec+: {
volumes: [
{
name: "config-volume",
configMap: {
name: "yb-proxy-config-" + i,
}
}
],
soloContainer:: base.Container('yugabyte-proxy') {
image: "haproxy:3.3",
imagePullPolicy: 'Always',
ports: [
{
containerPort: 7100,
name: 'master-grpc',
},
{
containerPort: 9100,
name: 'tserver-grpc',
},
],
volumeMounts: [
{
name: "config-volume",
mountPath: "/usr/local/etc/haproxy/",
}
],
},
},
},
},
} for i in std.range(0, std.length(metadata.yugabyte.tserverNodeIPs) - 1)
},
} else {}
}
23 changes: 20 additions & 3 deletions docs/architecture.md
Original file line number Diff line number Diff line change
Expand Up @@ -11,12 +11,29 @@ multiple organizations to each host one DSS instance that is interoperable with
each other organization's DSS instance. A DSS pool with three participating
organizations (USSs) will have an architecture similar to the diagram below.

_**Note** that the diagram shows 2 stateful sets per DSS instance. Currently, the
helm and tanka deployments produce 3 stateful sets per DSS instance. However, after
Issue #481 is resolved, this is expected to be reduced to 2 stateful sets._
_**Note** that the diagrams bellow shows 2 stateful sets per DSS instance. Currently, the helm and tanka deployments produce 3 stateful sets per DSS instance. However, after Issue #481 is resolved, this is expected to be reduced to 2 stateful sets._

### Certificates

This diagram shows how certificates are shared. It applies to both CockroachDB and Yugabyte deployments.

![Pool architecture diagram](assets/generated/pool_architecture_certs.png)

### CochroachDB

![Pool architecture diagram](assets/generated/pool_architecture.png)

### Yugabyte

Detail on an instance level
![Pool architecture diagram with Yugabyte](assets/generated/pool_architecture_yugabyte_instance.png)

Top level simplified view, with one replica shown and yugabyte services regrouped in one box.
![Pool architecture diagram with Yugabyte](assets/generated/pool_architecture_yugabyte.png)

To reduce the number of required public load balancers, we do use an intermediate reverse proxy to expose the ports of Yugabyte master and tserver on a shared public IP per stateful set instance.
Usual Kubernetes load balancers can't assign connection based on ports out of the box, so we use the reverse proxy to dispatch connections on both services depending on the connected port.

### Terminology notes

See [teminology notes](operations/pooling.md#terminology-notes).
Expand Down
Binary file modified docs/assets/generated/pool_architecture.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Loading