Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ ut-coverage.xml

# editor and IDE paraphernalia
.idea
.DS_Store
*.swp
*.swo
*~
Expand Down
22 changes: 22 additions & 0 deletions hack/loadtest/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
Here is how to run the load test locally. Make sure that you have installed go and git clone the repo

1. Build a fleet.

You can use any Kubernetes clusters you have and install the fleet agents on those clusters. In this example, we built a fleet with four member clusters, namely, cluster-1 to cluster-4.
Please remember to save the kubeconfig file pointing to the hub cluster of the fleet.

3. Run the load test binary locally.
```shell
export KUBECONFIG=xxxxx
go run hack/loadtest/main.go -max-current-placement 10 --cluster cluster-1 --cluster cluster-2 --cluster cluster-3 --cluster cluster-4
```

3. Manually check the metrics against the load test.
```shell
curl http://localhost:4848/metrics | grep workload
```

4. Use a local prometheus to draw graphs. Download prometheus binary for your local machine. Start the prometheus.
```shell
./prometheus --config.file=hack/loadtest/prometheus.yml
```
119 changes: 119 additions & 0 deletions hack/loadtest/main.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,119 @@
/*
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
*/

package main

import (
"context"
"flag"
"net/http"
"sync"
"time"

"github.com/prometheus/client_golang/prometheus/promhttp"
"k8s.io/apimachinery/pkg/runtime"
utilrand "k8s.io/apimachinery/pkg/util/rand"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"k8s.io/klog/v2"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/config"
workv1alpha1 "sigs.k8s.io/work-api/pkg/apis/v1alpha1"

fleetv1alpha1 "go.goms.io/fleet/apis/v1alpha1"
"go.goms.io/fleet/hack/loadtest/util"
)

var (
scheme = runtime.NewScheme()
)

var (
placementDeadline = flag.Int("placement-deadline-second", 300, "The deadline for a placement to be applied (in seconds)")
pollInterval = flag.Int("poll-interval-millisecond", 250, "The poll interval for verification (in milli-second)")
maxCurrentPlacement = flag.Int("max-current-placement", 10, "The number of current placement load.")
loadTestLength = flag.Int("load-test-length-minute", 15, "The length of the load test in miniutes.")
clusterNames util.ClusterNames
)

func init() {
klog.InitFlags(nil)
utilrand.Seed(time.Now().UnixNano())

utilruntime.Must(clientgoscheme.AddToScheme(scheme))
utilruntime.Must(fleetv1alpha1.AddToScheme(scheme))
utilruntime.Must(workv1alpha1.AddToScheme(scheme))
//+kubebuilder:scaffold:scheme
}

func main() {
flag.Var(&clusterNames, "cluster", "The name of a member cluster")
flag.Parse()
defer klog.Flush()

klog.InfoS("start to run placement load test", "pollInterval", *pollInterval, "placementDeadline", *placementDeadline, "maxCurrentPlacement", *maxCurrentPlacement, "clusterNames", clusterNames)
config := config.GetConfigOrDie()
config.QPS, config.Burst = float32(100), 500
hubClient, err := client.New(config, client.Options{
Scheme: scheme,
})
if err != nil {
panic(err)
}
ctx := ctrl.SetupSignalHandler()
if err = util.ApplyClusterScopeManifests(ctx, hubClient); err != nil {
panic(err)
}
loadTestCtx, canFunc := context.WithDeadline(ctx, time.Now().Add(time.Minute*time.Duration(*loadTestLength)))
defer canFunc()
// run the loadtest in the background
go runLoadTest(loadTestCtx, config)
// setup prometheus server
http.Handle("/metrics", promhttp.Handler())
if err = http.ListenAndServe(":4848", nil); err != nil {
panic(err)
}
}

func runLoadTest(ctx context.Context, config *rest.Config) {
var wg sync.WaitGroup
wg.Add(*maxCurrentPlacement)
for i := 0; i < *maxCurrentPlacement; i++ {
go func() {
// each use a separate client to avoid client side throttling, start each client side with a jitter
// to avoid creating too many clients at the same time.
time.Sleep(time.Millisecond * time.Duration(utilrand.Intn(100**maxCurrentPlacement)))
hubClient, err := client.New(config, client.Options{
Scheme: scheme,
})
if err != nil {
panic(err)
}
defer wg.Done()
// continuously apply and delete resources
for {
select {
case <-ctx.Done():
return
default:
if err = util.MeasureOnePlacement(ctx, hubClient, time.Duration(*placementDeadline)*time.Second, time.Duration(*pollInterval)*time.Millisecond, *maxCurrentPlacement, clusterNames); err != nil {
klog.ErrorS(err, "placement load test failed")
}
}
}
}()
}
wg.Wait()
hubClient, _ := client.New(config, client.Options{
Scheme: scheme,
})
if err := util.CleanupAll(hubClient); err != nil {
klog.ErrorS(err, "clean up placement load test hit an error")
}
util.PrintTestMetrics()
klog.InfoS(" placement load test finished. For more metrics, please use prometheus")
}
17 changes: 17 additions & 0 deletions hack/loadtest/manifests/endpoint-slice.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
apiVersion: discovery.k8s.io/v1
kind: EndpointSlice
metadata:
labels:
service-name: test-nginx-export
name: test-user-created-endpointslice
namespace: app
addressType: IPv4
ports:
- name: https
port: 443
protocol: TCP
endpoints:
- addresses:
- 20.106.105.216
conditions:
ready: true
12 changes: 12 additions & 0 deletions hack/loadtest/manifests/endpoints.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
apiVersion: v1
kind: Endpoints
metadata:
name: user-created-endpoint
namespace: app
subsets:
- addresses:
- ip: 20.106.105.216
ports:
- name: https
port: 443
protocol: TCP
25 changes: 25 additions & 0 deletions hack/loadtest/manifests/test-cloneset.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
apiVersion: apps.kruise.io/v1alpha1
kind: CloneSet
metadata:
name: guestbook-clone
namespace: app
spec:
replicas: 20
selector:
matchLabels:
app.kubernetes.io/name: guestbook-clone
template:
metadata:
labels:
app.kubernetes.io/name: guestbook-clone
spec:
containers:
- name: guestbook
image: openkruise/guestbook:v1
imagePullPolicy: Always
ports:
- name: http-server
containerPort: 3000
updateStrategy:
type: InPlaceIfPossible
maxUnavailable: 3
11 changes: 11 additions & 0 deletions hack/loadtest/manifests/test-configmap-2.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: test-configmap-2
namespace: app
labels:
fleet.azure.com/name: app
data:
field1: one
field2: two
field3: three
9 changes: 9 additions & 0 deletions hack/loadtest/manifests/test-configmap.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: test-configmap
namespace: app
data:
fielda: one
fieldb: two
fieldc: three
9 changes: 9 additions & 0 deletions hack/loadtest/manifests/test-role.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
namespace: app
name: test-pod-reader
rules:
- apiGroups: [""] # "" indicates the core API group
resources: ["pods"]
verbs: ["get", "watch", "list"]
17 changes: 17 additions & 0 deletions hack/loadtest/manifests/test-rolebinding.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
apiVersion: rbac.authorization.k8s.io/v1
# This role binding allows "jane" to read pods in the "default" namespace.
# You need to already have a Role named "pod-reader" in that namespace.
kind: RoleBinding
metadata:
name: read-pods
namespace: app
subjects:
# You can specify more than one "subject"
- kind: User
name: jane # "name" is case sensitive
apiGroup: rbac.authorization.k8s.io
roleRef:
# "roleRef" specifies the binding to a Role / ClusterRole
kind: Role #this must be Role or ClusterRole
apiGroup: rbac.authorization.k8s.io
name: test-pod-reader # this must match the name of the Role or ClusterRole you wish to bind to
8 changes: 8 additions & 0 deletions hack/loadtest/manifests/test-secret.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
apiVersion: v1
kind: Secret
metadata:
name: test-secret
namespace: app
data:
somekey: Q2xpZW50SWQ6IDUxOTEwNTY4LTM0YzktNGQ0ZS1iODA1LTNmNTY3NWQyMDdiYwpDbGllbnRTZWNyZXQ6IDZSLThRfkJvSDNNYm1+eGJpaDhmNVZibHBkWGxzeGQyRnp+WXhjWjYKVGVuYW50SWQ6IDcyZjk4OGJmLTg2ZjEtNDFhZi05MWFiLTJkN2NkMDExZGI0NwpTdWJzY3JpcHRpb25JZDogMmIwM2JmYjgtZTg4NS00NTY2LWE2MmEtOTA5YTExZDcxNjkyClJlc291cmNlR3JvdXA6IGNhcmF2ZWwtZGVtbw==
type: generic
27 changes: 27 additions & 0 deletions hack/loadtest/manifests/test-service.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
# Copyright 2021 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

apiVersion: v1
kind: Service
metadata:
name: test-nginx
namespace: app
labels:
run: test-nginx
spec:
ports:
- port: 80
protocol: TCP
selector:
run: test-nginx
Loading