diff --git a/pkg/cloudscale_ccm/loadbalancer.go b/pkg/cloudscale_ccm/loadbalancer.go index 205a19d..0e927d9 100644 --- a/pkg/cloudscale_ccm/loadbalancer.go +++ b/pkg/cloudscale_ccm/loadbalancer.go @@ -181,6 +181,20 @@ const ( // // Changing this annotation on an established service is considered safe. LoadBalancerListenerTimeoutMemberDataMS = "k8s.cloudscale.ch/loadbalancer-timeout-member-data-ms" + + // LoadBalancerSubnetLimit is a JSON list of subnet UUIDs that the + // loadbalancer should use. By default, all subnets of a node are used: + // + // * `[]` means that anyone is allowed to connect (default). + // * `["0769b7cf-199b-4d42-9fbd-9ab3d11d08da"]` only bind to this subnet. + // + // If set, the limit causes nodes that do not have a matching subnet + // to be ignored. If no nodes with matching subnets are found, an + // error is returned. + // + // This is an advanced feature, useful if you have nodes that are in + // multiple private subnets. + LoadBalancerListenerAllowedSubnets = "k8s.cloudscale.ch/loadbalancer-listener-allowed-subnets" ) type loadbalancer struct { diff --git a/pkg/cloudscale_ccm/reconcile.go b/pkg/cloudscale_ccm/reconcile.go index eec0fbd..97c210b 100644 --- a/pkg/cloudscale_ccm/reconcile.go +++ b/pkg/cloudscale_ccm/reconcile.go @@ -141,6 +141,12 @@ func desiredLbState( s.pools = append(s.pools, &pool) // For each server and private address, we need to add a pool member + allowedSubnets, err := serviceInfo.annotationList( + LoadBalancerListenerAllowedSubnets) + if err != nil { + return nil, err + } + for _, server := range servers { for _, iface := range server.Interfaces { @@ -152,6 +158,16 @@ func desiredLbState( // Create a pool member for each address for _, addr := range iface.Addresses { + // Networks without subnets are not supported + if addr.Subnet.UUID == "" { + continue + } + + if len(allowedSubnets) > 0 && !slices.Contains( + allowedSubnets, addr.Subnet.UUID) { + continue + } + name := poolMemberName(addr.Address, nodePort) s.members[&pool] = append(s.members[&pool], cloudscale.LoadBalancerPoolMember{ diff --git a/pkg/cloudscale_ccm/reconcile_test.go b/pkg/cloudscale_ccm/reconcile_test.go index f4489a0..952b3b6 100644 --- a/pkg/cloudscale_ccm/reconcile_test.go +++ b/pkg/cloudscale_ccm/reconcile_test.go @@ -789,3 +789,102 @@ func TestNextMonitorActions(t *testing.T) { actions.UpdateMonitorNumber("1", 3, "up-threshold"), }) } + +func TestLimitSubnets(t *testing.T) { + s := testkit.NewService("service").V1() + s.Spec.Ports = []v1.ServicePort{ + { + Protocol: "TCP", + Port: 80, + NodePort: 123456, + }, + } + + i := newServiceInfo(s, "") + + nodes := []*v1.Node{ + testkit.NewNode("foo").V1(), + testkit.NewNode("bar").V1(), + } + + servers := []cloudscale.Server{ + { + Name: "foo", + ZonalResource: cloudscale.ZonalResource{ + Zone: cloudscale.Zone{Slug: "lpg1"}, + }, + Interfaces: []cloudscale.Interface{ + { + Addresses: []cloudscale.Address{{ + Address: "10.0.1.1", + Subnet: cloudscale.SubnetStub{ + UUID: "00000000-0000-0000-0000-000000000001", + }, + }}, + }, + { + Addresses: []cloudscale.Address{{ + Address: "10.0.2.1", + Subnet: cloudscale.SubnetStub{ + UUID: "00000000-0000-0000-0000-000000000002", + }, + }}, + }, + }, + }, + { + Name: "bar", + ZonalResource: cloudscale.ZonalResource{ + Zone: cloudscale.Zone{Slug: "lpg1"}, + }, + Interfaces: []cloudscale.Interface{ + { + Addresses: []cloudscale.Address{{ + Address: "10.0.1.2", + Subnet: cloudscale.SubnetStub{ + UUID: "00000000-0000-0000-0000-000000000001", + }, + }}, + }, + { + Addresses: []cloudscale.Address{{ + Address: "10.0.2.2", + Subnet: cloudscale.SubnetStub{ + UUID: "00000000-0000-0000-0000-000000000002", + }, + }}, + }, + }, + }, + } + + // By default, we get two pool members per server + state, err := desiredLbState(i, nodes, servers) + assert.NoError(t, err) + assert.Len(t, state.pools, 1) + assert.Len(t, state.members[state.pools[0]], 4) + assert.Equal(t, "10.0.1.1", state.members[state.pools[0]][0].Address) + assert.Equal(t, "10.0.2.1", state.members[state.pools[0]][1].Address) + assert.Equal(t, "10.0.1.2", state.members[state.pools[0]][2].Address) + assert.Equal(t, "10.0.2.2", state.members[state.pools[0]][3].Address) + + // We can limit those pool members + s.Annotations = make(map[string]string) + s.Annotations[LoadBalancerListenerAllowedSubnets] = ` + ["00000000-0000-0000-0000-000000000001"]` + + // Now we should see half the pool members + state, err = desiredLbState(i, nodes, servers) + assert.NoError(t, err) + assert.Len(t, state.pools, 1) + assert.Len(t, state.members[state.pools[0]], 2) + assert.Equal(t, "10.0.1.1", state.members[state.pools[0]][0].Address) + assert.Equal(t, "10.0.1.2", state.members[state.pools[0]][1].Address) + + // If we have no valid addresses, we get an error + s.Annotations[LoadBalancerListenerAllowedSubnets] = ` + ["00000000-0000-0000-0000-000000000003"]` + + _, err = desiredLbState(i, nodes, servers) + assert.Error(t, err) +} diff --git a/pkg/cloudscale_ccm/service_info.go b/pkg/cloudscale_ccm/service_info.go index 577d3db..7e31935 100644 --- a/pkg/cloudscale_ccm/service_info.go +++ b/pkg/cloudscale_ccm/service_info.go @@ -110,6 +110,8 @@ func (s serviceInfo) annotation(key string) string { return s.annotationOrDefault(key, "5000") case LoadBalancerListenerTimeoutMemberDataMS: return s.annotationOrDefault(key, "50000") + case LoadBalancerListenerAllowedSubnets: + return s.annotationOrDefault(key, "[]") default: return s.annotationOrElse(key, func() string { klog.Warning("unknown annotation:", key)