From f751ff8ea27bc1267d54c5cd8cc9a5973c049287 Mon Sep 17 00:00:00 2001 From: "andrey.klyuev" Date: Mon, 19 Dec 2022 12:15:04 +0300 Subject: [PATCH 1/2] Change leader election logic --- apis/v1beta1/cluster_types.go | 39 ++- apis/v1beta1/role_types.go | 12 +- apis/v1beta1/zz_generated.deepcopy.go | 15 + config/crd/bases/tarantool.io_clusters.yaml | 4 + config/crd/bases/tarantool.io_roles.yaml | 19 +- controllers/cartridgeconfig_controller.go | 1 + controllers/cluster_controller.go | 2 + controllers/role_controller.go | 1 + pkg/api/failover.go | 12 +- pkg/api/role.go | 2 + pkg/election/election.go | 65 +++- pkg/election/election_test.go | 287 ++++++++++++++++++ pkg/election/suite_test.go | 44 +++ .../steps/cluster/conifgure_failover.go | 74 +++-- .../steps/role/ensure_cartridge_ready.go | 27 +- pkg/topology/common.go | 60 +++- pkg/topology/topology.go | 3 + test/mocks/topology.go | 12 + test/resources/cartridge.go | 9 +- test/resources/cluster.go | 6 + test/resources/pod.go | 64 +++- 21 files changed, 684 insertions(+), 74 deletions(-) create mode 100644 pkg/election/election_test.go create mode 100644 pkg/election/suite_test.go diff --git a/apis/v1beta1/cluster_types.go b/apis/v1beta1/cluster_types.go index e49ab6f9..0272b889 100644 --- a/apis/v1beta1/cluster_types.go +++ b/apis/v1beta1/cluster_types.go @@ -2,7 +2,6 @@ package v1beta1 import ( "github.com/tarantool/tarantool-operator/pkg/api" - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -121,7 +120,7 @@ type FailoverEtcd2 struct { // Password for etcd2 connection // +optional - Password corev1.SecretReference `json:"password"` + Password SecretKeyReference `json:"password"` // LockDelay - Timeout (in seconds), determines lock’s time-to-live (default: 10) // +kubebuilder:default=10 @@ -141,8 +140,8 @@ func (in *FailoverEtcd2) GetUsername() string { return in.Username } -func (in *FailoverEtcd2) GetPassword() corev1.SecretReference { - return in.Password +func (in *FailoverEtcd2) GetPassword() api.SecretKeyReference { + return &in.Password } func (in *FailoverEtcd2) GetLockDelay() int32 { @@ -162,15 +161,41 @@ type FailoverStateboard struct { // Password for etcd2 connection // +optional - Password corev1.SecretReference `json:"password"` + Password SecretKeyReference `json:"password"` } func (in *FailoverStateboard) GetURI() string { return in.URI } -func (in *FailoverStateboard) GetPassword() corev1.SecretReference { - return in.Password +func (in *FailoverStateboard) GetPassword() api.SecretKeyReference { + return &in.Password +} + +// SecretKeyReference represents a reference to filed in Secret. It has enough information to retrieve a value secret in any namespace +// +structType=atomic. +type SecretKeyReference struct { + // Namespace defines the space within which the secret name must be unique. + // +optional + Namespace string `json:"namespace"` + // Name is unique within a namespace to reference a secret resource. + // +optional + Name string `json:"name"` + // Key is unique within a Secret to reference a value. + // +optional + Key string `json:"key"` +} + +func (in *SecretKeyReference) GetNamespace() string { + return in.Namespace +} + +func (in *SecretKeyReference) GetName() string { + return in.Name +} + +func (in *SecretKeyReference) GetKey() string { + return in.Key } // ClusterPhase is a label for the condition of a Cluster at the current time. diff --git a/apis/v1beta1/role_types.go b/apis/v1beta1/role_types.go index 4b3b219b..8aacc106 100644 --- a/apis/v1beta1/role_types.go +++ b/apis/v1beta1/role_types.go @@ -124,6 +124,9 @@ type RoleStatus struct { // Phase of roles // +kubebuilder:default=Pending Phase RolePhase `json:"phase"` + + // ReadyPods a string in format "ready_pods_count/total_pods_count" for printable column + ReadyPods string `json:"readyPods"` } // Role is the Schema for the roles API @@ -131,10 +134,7 @@ type RoleStatus struct { // +kubebuilder:subresource:status // +kubebuilder:storageversion // +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase",priority=0 -// +kubebuilder:printcolumn:name="Replicasets",type="number",JSONPath=".status.replicasets",priority=1 -// +kubebuilder:printcolumn:name="Ready replicasets",type="number",JSONPath=".status.readyReplicasets",priority=1 -// +kubebuilder:printcolumn:name="Replicas",type="number",JSONPath=".status.replicas",priority=1 -// +kubebuilder:printcolumn:name="Ready replicas",type="number",JSONPath=".status.readyReplicas",priority=1 +// +kubebuilder:printcolumn:name="Pods",type="number",JSONPath=".status.readyPods",priority=1 // +kubebuilder:printcolumn:name="Weight",type="number",JSONPath=".status.weight",priority=0 // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",priority=0 // +k8s:openapi-gen=true @@ -171,6 +171,10 @@ func (in *Role) ResetStatus() { in.Status = RoleStatus{} } +func (in *Role) SetReadyPodsCount(count int32) { + in.Status.ReadyPods = fmt.Sprintf("%d/%d", count, in.GetReplicasets()*in.GetReplicas()) +} + func (in *Role) SetPhase(phase RolePhase) { in.Status.Phase = phase } diff --git a/apis/v1beta1/zz_generated.deepcopy.go b/apis/v1beta1/zz_generated.deepcopy.go index 9f2292b6..e5de093a 100644 --- a/apis/v1beta1/zz_generated.deepcopy.go +++ b/apis/v1beta1/zz_generated.deepcopy.go @@ -430,3 +430,18 @@ func (in *RoleVShardConfig) DeepCopy() *RoleVShardConfig { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretKeyReference) DeepCopyInto(out *SecretKeyReference) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretKeyReference. +func (in *SecretKeyReference) DeepCopy() *SecretKeyReference { + if in == nil { + return nil + } + out := new(SecretKeyReference) + in.DeepCopyInto(out) + return out +} diff --git a/config/crd/bases/tarantool.io_clusters.yaml b/config/crd/bases/tarantool.io_clusters.yaml index 2d9c6052..7ca44768 100644 --- a/config/crd/bases/tarantool.io_clusters.yaml +++ b/config/crd/bases/tarantool.io_clusters.yaml @@ -111,6 +111,8 @@ spec: type: integer password: properties: + key: + type: string name: type: string namespace: @@ -152,6 +154,8 @@ spec: properties: password: properties: + key: + type: string name: type: string namespace: diff --git a/config/crd/bases/tarantool.io_roles.yaml b/config/crd/bases/tarantool.io_roles.yaml index ce522bf1..3f2f35c3 100644 --- a/config/crd/bases/tarantool.io_roles.yaml +++ b/config/crd/bases/tarantool.io_roles.yaml @@ -3459,20 +3459,8 @@ spec: - jsonPath: .status.phase name: Phase type: string - - jsonPath: .status.replicasets - name: Replicasets - priority: 1 - type: number - - jsonPath: .status.readyReplicasets - name: Ready replicasets - priority: 1 - type: number - - jsonPath: .status.replicas - name: Replicas - priority: 1 - type: number - - jsonPath: .status.readyReplicas - name: Ready replicas + - jsonPath: .status.readyPods + name: Pods priority: 1 type: number - jsonPath: .status.weight @@ -6833,8 +6821,11 @@ spec: phase: default: Pending type: string + readyPods: + type: string required: - phase + - readyPods type: object required: - metadata diff --git a/controllers/cartridgeconfig_controller.go b/controllers/cartridgeconfig_controller.go index 8b4e4eff..fceb39cb 100644 --- a/controllers/cartridgeconfig_controller.go +++ b/controllers/cartridgeconfig_controller.go @@ -74,6 +74,7 @@ func NewCartridgeConfigReconciler(mgr Manager) *CartridgeConfigReconciler { Client: k8sClient, Recorder: eventsRecorder, ResourcesManager: resourcesManager, + Topology: luaTopology, }, ResourcesManager: resourcesManager, EventsRecorder: eventsRecorder, diff --git a/controllers/cluster_controller.go b/controllers/cluster_controller.go index ac1c8781..d961ea3f 100644 --- a/controllers/cluster_controller.go +++ b/controllers/cluster_controller.go @@ -89,6 +89,7 @@ func NewClusterReconciler(mgr Manager) *ClusterReconciler { Client: k8sClient, Recorder: eventsRecorder, ResourcesManager: resourcesManager, + Topology: luaTopology, }, ResourcesManager: resourcesManager, EventsRecorder: eventsRecorder, @@ -144,6 +145,7 @@ func (r *ClusterReconciler) SetupWithManager(mgr Manager) error { return NewControllerManagedBy(mgr). For(&Cluster{}). Owns(&v1.Service{}). + Watches(&source.Kind{Type: &v1.Secret{}}, &handler.EnqueueRequestForOwner{OwnerType: &Cluster{}, IsController: false}). Watches(&source.Kind{Type: &Role{}}, handler.EnqueueRequestsFromMapFunc(func(obj client.Object) []reconcile.Request { role, ok := obj.(*Role) if !ok { diff --git a/controllers/role_controller.go b/controllers/role_controller.go index fb4e4c51..523be9f9 100644 --- a/controllers/role_controller.go +++ b/controllers/role_controller.go @@ -79,6 +79,7 @@ func NewRoleReconciler(mgr Manager) *RoleReconciler { Client: k8sClient, Recorder: eventsRecorder, ResourcesManager: resourcesManager, + Topology: luaTopology, }, ResourcesManager: resourcesManager, EventsRecorder: eventsRecorder, diff --git a/pkg/api/failover.go b/pkg/api/failover.go index a363461d..76ae533c 100644 --- a/pkg/api/failover.go +++ b/pkg/api/failover.go @@ -1,7 +1,5 @@ package api -import v1 "k8s.io/api/core/v1" - type FailoverMode string const ( @@ -33,12 +31,18 @@ type FailoverConfig interface { type FailoverETCD2Config interface { GetEndpoints() []string GetUsername() string - GetPassword() v1.SecretReference + GetPassword() SecretKeyReference GetLockDelay() int32 GetPrefix() string } type FailoverStateboardConfig interface { GetURI() string - GetPassword() v1.SecretReference + GetPassword() SecretKeyReference +} + +type SecretKeyReference interface { + GetNamespace() string + GetName() string + GetKey() string } diff --git a/pkg/api/role.go b/pkg/api/role.go index ac10ff83..c3cbbe4a 100644 --- a/pkg/api/role.go +++ b/pkg/api/role.go @@ -32,6 +32,8 @@ type Role interface { GetVShardConfig() VShardConfig ResetStatus() + + SetReadyPodsCount(count int32) } type VShardConfig interface { diff --git a/pkg/election/election.go b/pkg/election/election.go index 6a29c2a2..f9f742c3 100644 --- a/pkg/election/election.go +++ b/pkg/election/election.go @@ -7,6 +7,7 @@ import ( "github.com/tarantool/tarantool-operator/pkg/api" "github.com/tarantool/tarantool-operator/pkg/events" "github.com/tarantool/tarantool-operator/pkg/k8s" + "github.com/tarantool/tarantool-operator/pkg/topology" "github.com/tarantool/tarantool-operator/pkg/utils" v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -17,6 +18,8 @@ type LeaderElection struct { client.Client k8s.ResourcesManager *events.Recorder + + Topology topology.CartridgeTopology } var ( @@ -27,12 +30,29 @@ var ( ) // GetLeaderInstance return first pod of first replicaset of first role. +// Its implements the following logic +// 1. Retrieve previously elected leader +// 1.1 When leader where is no previously elected leader, elects a new one +// 1.2 When previously elected leader is not available +// 1.2.1 and cluster was already bootstrapped, try to find new one instance +// 1.2.2 and cluster was NOT bootstrapped - it fails, because we can't safely change leader in this case, +// +// previously elected leader may have some important config and topology data, which can be absent on new leader, +// some empty instance may be elected in that case it will produce a split brain. func (r *LeaderElection) GetLeaderInstance(ctx context.Context, cluster api.Cluster) (*v1.Pod, error) { leader, err := r.loadLeaderInstance(ctx, cluster) if err != nil { - if errors.Is(err, ErrLeaderWasNotElected) || errors.Is(err, ErrLeaderNotReady) { + if errors.Is(err, ErrLeaderWasNotElected) { return r.ElectLeaderInstance(ctx, cluster) } + + if errors.Is(err, ErrLeaderNotReady) { + if cluster.IsBootstrapped() { + return r.ElectLeaderInstance(ctx, cluster) + } + + return nil, err + } } return leader, nil @@ -70,7 +90,12 @@ func (r *LeaderElection) loadLeaderInstance(ctx context.Context, cluster api.Clu return nil, err } - if !r.CanBeLeader(cluster, leader) { + canBeLeader, err := r.CanBeLeader(ctx, cluster, leader) + if err != nil { + return nil, err + } + + if !canBeLeader { return nil, ErrLeaderNotReady } @@ -110,9 +135,10 @@ func (r *LeaderElection) findNewLeaderInstance(ctx context.Context, cluster api. } var ( - stsName string - podName string - pod *v1.Pod + stsName string + podName string + canBeLeader bool + pod *v1.Pod ) for podOrdinal := int32(0); podOrdinal < maxReplicas; podOrdinal++ { @@ -145,7 +171,12 @@ func (r *LeaderElection) findNewLeaderInstance(ctx context.Context, cluster api. return nil, err } - if r.CanBeLeader(cluster, pod) { + canBeLeader, err = r.CanBeLeader(ctx, cluster, pod) + if err != nil { + return nil, err + } + + if canBeLeader { return pod, nil } } @@ -155,20 +186,30 @@ func (r *LeaderElection) findNewLeaderInstance(ctx context.Context, cluster api. return nil, ErrNoAvailableLeader } -func (r *LeaderElection) CanBeLeader(cluster api.Cluster, pod *v1.Pod) bool { - if pod.GetDeletionTimestamp() != nil { - return false +func (r *LeaderElection) CanBeLeader(ctx context.Context, cluster api.Cluster, pod *v1.Pod) (bool, error) { + if utils.IsPodDeleting(pod) { + return false, nil } if !utils.IsPodRunning(pod) { - return false + return false, nil } if !cluster.IsBootstrapped() { - return utils.IsPodDefaultContainerReady(pod) + started, err := r.Topology.IsCartridgeStarted(ctx, pod) + if err != nil { + return false, err + } + + return started, nil + } + + configured, err := r.Topology.IsCartridgeConfigured(ctx, pod) + if err != nil { + return false, err } - return utils.IsPodReady(pod) + return configured, nil } func (r *LeaderElection) IsLeader(cluster api.Cluster, pod *v1.Pod) bool { diff --git a/pkg/election/election_test.go b/pkg/election/election_test.go new file mode 100644 index 00000000..72a150c6 --- /dev/null +++ b/pkg/election/election_test.go @@ -0,0 +1,287 @@ +package election_test + +import ( + "fmt" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/stretchr/testify/mock" + . "github.com/tarantool/tarantool-operator/internal/implementation" + . "github.com/tarantool/tarantool-operator/pkg/election" + "github.com/tarantool/tarantool-operator/pkg/events" + "github.com/tarantool/tarantool-operator/pkg/k8s" + "github.com/tarantool/tarantool-operator/test/mocks" + "github.com/tarantool/tarantool-operator/test/resources" + "github.com/tarantool/tarantool-operator/test/utils" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func newTestElection(fakeClient client.Client, fakeTopologyService *mocks.FakeCartridgeTopology) *LeaderElection { + resourcesManager := &ResourcesManager{ + LabelsManager: labelsManager, + CommonResourcesManager: &k8s.CommonResourcesManager{ + Client: fakeClient, + Scheme: scheme.Scheme, + }, + } + + return &LeaderElection{ + Client: fakeClient, + Recorder: events.NewRecorder(record.NewFakeRecorder(10)), + ResourcesManager: resourcesManager, + Topology: fakeTopologyService, + } +} + +var _ = Describe("election unit testing", func() { + var ( + namespace = "default" + clusterName string + cartridge *resources.FakeCartridge + ) + + BeforeEach(func() { + clusterName = fmt.Sprintf("cluster-%s", utils.RandStringRunes(4)) + cartridge = resources.NewFakeCartridge(labelsManager). + WithNamespace(namespace). + WithClusterName(clusterName) + }) + + Context("Initial election", func() { + It("should not elect foreign pods", func() { + fakeClient := cartridge.NewFakeClientBuilder().WithObjects( + &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foreign-pod", + Namespace: namespace, + }, + }, + ).Build() + fakeTopologyService := new(mocks.FakeCartridgeTopology) + + election := newTestElection(fakeClient, fakeTopologyService) + + leader, err := election.GetLeaderInstance(ctx, cartridge.Cluster) + Expect(err).To(HaveOccurred()) + Expect(err).To(BeEquivalentTo(ErrNoAvailableLeader)) + Expect(leader).To(BeNil(), "leader should not be elected") + }) + + It("should skip pod which is not running", func() { + cartridge.WithRouterRole(2, 1) + cartridge.WithStorageRole(2, 2) + cartridge.WithStatefulSetsCreated("router") + cartridge.WithStatefulSetsCreated("storage") + cartridge.WithPodsCreated("router") + cartridge.WithPodsCreated("storage") + + fakeClient := cartridge.BuildFakeClient() + fakeTopologyService := new(mocks.FakeCartridgeTopology) + + election := newTestElection(fakeClient, fakeTopologyService) + + leader, err := election.GetLeaderInstance(ctx, cartridge.Cluster) + Expect(err).To(HaveOccurred()) + Expect(err).To(BeEquivalentTo(ErrNoAvailableLeader)) + Expect(leader).To(BeNil(), "leader should not be elected") + }) + + It("should skip deleting pod", func() { + cartridge.WithRouterRole(2, 1) + cartridge.WithStorageRole(2, 2) + cartridge.WithStatefulSetsCreated("router") + cartridge.WithStatefulSetsCreated("storage") + cartridge.WithPodsCreated("router") + cartridge.WithPodsCreated("storage") + cartridge.WithAllPodsDeleting() + + fakeClient := cartridge.BuildFakeClient() + fakeTopologyService := new(mocks.FakeCartridgeTopology) + + election := newTestElection(fakeClient, fakeTopologyService) + + leader, err := election.GetLeaderInstance(ctx, cartridge.Cluster) + Expect(err).To(HaveOccurred()) + Expect(err).To(BeEquivalentTo(ErrNoAvailableLeader)) + Expect(leader).To(BeNil(), "leader should not be elected") + }) + + It("should skip pods where cartridge not started", func() { + cartridge.WithRouterRole(1, 1) + cartridge.WithStorageRole(1, 1) + cartridge.WithStatefulSetsCreated("router") + cartridge.WithStatefulSetsCreated("storage") + cartridge.WithPodsCreated("router") + cartridge.WithPodsCreated("storage") + cartridge.WithAllPodsRunning() + + fakeClient := cartridge.BuildFakeClient() + fakeTopologyService := new(mocks.FakeCartridgeTopology) + fakeTopologyService. + On("IsCartridgeStarted", mock.Anything, mock.Anything). + Return(false, nil) + + election := newTestElection(fakeClient, fakeTopologyService) + + leader, err := election.GetLeaderInstance(ctx, cartridge.Cluster) + Expect(err).To(HaveOccurred()) + Expect(err).To(BeEquivalentTo(ErrNoAvailableLeader)) + Expect(leader).To(BeNil(), "leader should not be elected") + }) + + It("should elected any non-foreign running pod", func() { + cartridge.WithRouterRole(1, 1) + cartridge.WithStorageRole(1, 1) + cartridge.WithStatefulSetsCreated("router") + cartridge.WithStatefulSetsCreated("storage") + cartridge.WithPodsCreated("router") + cartridge.WithPodsCreated("storage") + cartridge.WithAllPodsRunning() + + fakeClient := cartridge.NewFakeClientBuilder().WithObjects( + &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foreign-pod", + Namespace: namespace, + }, + }, + ).Build() + + fakeTopologyService := new(mocks.FakeCartridgeTopology) + fakeTopologyService. + On("IsCartridgeStarted", mock.Anything, mock.Anything). + Return(true, nil) + + election := newTestElection(fakeClient, fakeTopologyService) + + leader, err := election.GetLeaderInstance(ctx, cartridge.Cluster) + Expect(err).NotTo(HaveOccurred()) + Expect(leader).NotTo(BeNil(), "leader should be elected") + Expect(leader.GetName()).NotTo(BeEquivalentTo("foreign-pod"), "should not elect foreign pods") + }) + }) + + Context("Re-election on bootstrapped cluster", func() { + It("should not change leader if it's alive, and has config", func() { + cartridge.WithRouterRole(1, 1) + cartridge.WithStorageRole(1, 1) + cartridge.WithStatefulSetsCreated("router") + cartridge.WithStatefulSetsCreated("storage") + cartridge.WithPodsCreated("router") + cartridge.WithPodsCreated("storage") + cartridge.WithAllPodsRunning() + cartridge.WithLeader("router-0-0") + cartridge.Bootstrapped() + + fakeClient := cartridge.BuildFakeClient() + fakeTopologyService := new(mocks.FakeCartridgeTopology) + fakeTopologyService. + On("IsCartridgeConfigured", mock.Anything, mock.Anything). + Return(true, nil) + + election := newTestElection(fakeClient, fakeTopologyService) + + leader, err := election.GetLeaderInstance(ctx, cartridge.Cluster) + Expect(err).NotTo(HaveOccurred()) + Expect(leader).NotTo(BeNil(), "leader should be elected") + Expect(leader.GetName()).To(BeEquivalentTo("router-0-0"), "leader should not be changed") + }) + + Describe("should change leader if it's dead", func() { + It("should skip pod where no config present", func() { + cartridge.WithRouterRole(1, 1) + cartridge.WithStorageRole(2, 1) + cartridge.WithStatefulSetsCreated("router") + cartridge.WithStatefulSetsCreated("storage") + cartridge.WithPodsCreated("router") + cartridge.WithPodsCreated("storage") + cartridge.WithPodsRunning("storage-0-0", "storage-1-0") + cartridge.WithLeader("router-0-0") + cartridge.Bootstrapped() + + fakeClient := cartridge.BuildFakeClient() + fakeTopologyService := new(mocks.FakeCartridgeTopology) + fakeTopologyService. + On("IsCartridgeConfigured", mock.Anything, mock.MatchedBy(func(pod *v1.Pod) bool { + return pod.GetName() == "storage-0-0" + })). + Return(false, nil) + + fakeTopologyService. + On("IsCartridgeConfigured", mock.Anything, mock.MatchedBy(func(pod *v1.Pod) bool { + return pod.GetName() == "storage-1-0" + })). + Return(true, nil) + + election := newTestElection(fakeClient, fakeTopologyService) + + leader, err := election.GetLeaderInstance(ctx, cartridge.Cluster) + Expect(err).NotTo(HaveOccurred()) + Expect(leader).NotTo(BeNil(), "leader should be changed") + Expect(leader.GetName()).To(BeEquivalentTo("storage-1-0"), "new leader should be configured pod") + }) + }) + }) + + Context("Re-election on not bootstrapped cluster", func() { + It("should elect same leader if it's running", func() { + cartridge.WithRouterRole(1, 1) + cartridge.WithStorageRole(2, 1) + cartridge.WithStatefulSetsCreated("router") + cartridge.WithStatefulSetsCreated("storage") + cartridge.WithPodsCreated("router") + cartridge.WithPodsCreated("storage") + cartridge.WithAllPodsRunning() + cartridge.WithLeader("router-0-0") + + fakeClient := cartridge.BuildFakeClient() + fakeTopologyService := new(mocks.FakeCartridgeTopology) + fakeTopologyService. + On("IsCartridgeStarted", mock.Anything, mock.Anything). + Return(true, nil) + + election := newTestElection(fakeClient, fakeTopologyService) + + leader, err := election.GetLeaderInstance(ctx, cartridge.Cluster) + Expect(err).NotTo(HaveOccurred()) + Expect(leader).NotTo(BeNil(), "leader should be elected") + Expect(leader.GetName()).To(BeEquivalentTo("router-0-0"), "leader should not be changed") + }) + + It("should error if leader is not running", func() { + cartridge.WithRouterRole(1, 1) + cartridge.WithStorageRole(2, 1) + cartridge.WithStatefulSetsCreated("router") + cartridge.WithStatefulSetsCreated("storage") + cartridge.WithPodsCreated("router") + cartridge.WithPodsCreated("storage") + cartridge.WithPodsRunning("storage-0-0", "storage-1-0") + cartridge.WithLeader("router-0-0") + + fakeClient := cartridge.BuildFakeClient() + fakeTopologyService := new(mocks.FakeCartridgeTopology) + fakeTopologyService. + On("IsCartridgeStarted", mock.Anything, mock.MatchedBy(func(pod *v1.Pod) bool { + return pod.GetName() == "router-0-0" + })). + Return(false, nil) + + fakeTopologyService. + On("IsCartridgeStarted", mock.Anything, mock.MatchedBy(func(pod *v1.Pod) bool { + return pod.GetName() != "router-0-0" + })). + Return(true, nil) + + election := newTestElection(fakeClient, fakeTopologyService) + + leader, err := election.GetLeaderInstance(ctx, cartridge.Cluster) + Expect(err).To(HaveOccurred()) + Expect(leader).To(BeNil(), "leader should not be elected") + Expect(err).To(BeEquivalentTo(ErrLeaderNotReady)) + }) + }) +}) diff --git a/pkg/election/suite_test.go b/pkg/election/suite_test.go new file mode 100644 index 00000000..67cb4975 --- /dev/null +++ b/pkg/election/suite_test.go @@ -0,0 +1,44 @@ +package election_test + +import ( + "context" + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "github.com/tarantool/tarantool-operator/apis/v1beta1" + "github.com/tarantool/tarantool-operator/pkg/k8s" + "k8s.io/client-go/kubernetes/scheme" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +var ( + ctx = context.Background() + cancel context.CancelFunc + labelsManager = &k8s.NamespacedLabelsManager{ + Namespace: "tarantool.io", + } +) + +func TestElection(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "Controllers Suite") +} + +var _ = BeforeSuite(func() { + var err error + + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + ctx, cancel = context.WithCancel(context.Background()) + + err = scheme.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + err = v1beta1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) +}, 60) + +var _ = AfterSuite(func() { + cancel() +}) diff --git a/pkg/reconciliation/steps/cluster/conifgure_failover.go b/pkg/reconciliation/steps/cluster/conifgure_failover.go index 0888603d..35a39011 100644 --- a/pkg/reconciliation/steps/cluster/conifgure_failover.go +++ b/pkg/reconciliation/steps/cluster/conifgure_failover.go @@ -2,9 +2,12 @@ package cluster import ( "github.com/google/go-cmp/cmp" + "github.com/pkg/errors" "github.com/tarantool/tarantool-operator/pkg/api" . "github.com/tarantool/tarantool-operator/pkg/reconciliation" "github.com/tarantool/tarantool-operator/pkg/topology" + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" ) type ConfigureFailoverStep[ClusterType api.Cluster, CtxType ClusterContext[ClusterType], CtrlType ClusterController] struct{} @@ -65,59 +68,82 @@ func (r *ConfigureFailoverStep[ClusterType, CtxType, CtrlType]) LoadFailoverPara } if config.GetMode() == api.FailoverModeStateful { + var ( + secret *corev1.Secret + password []byte + err error + ok bool + ) + switch config.GetStateProvider() { case api.FailoverStateProviderETCD2: - var ( - etcd2Password string - err error - ) - etcd2config := config.GetETCD2Config() etcd2PasswordRef := etcd2config.GetPassword() - if etcd2PasswordRef.Name != "" { - etcd2Password, err = ctrl.GetResourcesManager().GetSecretValue( - ctx, - etcd2PasswordRef.Namespace, - etcd2PasswordRef.Name, - "etcd2-password", // fixme: make not hardcoded - ) + if etcd2PasswordRef.GetName() != "" { + secret, err = ctrl.GetResourcesManager().GetSecret(ctx, etcd2PasswordRef.GetNamespace(), etcd2PasswordRef.GetName()) if err != nil { return nil, err } + + etcd2secretKey := etcd2PasswordRef.GetKey() + if etcd2secretKey == "" { + etcd2secretKey = "etcd2-password" + } + + password, ok = secret.Data[etcd2secretKey] + if !ok { + return nil, errors.New("no such key") + } } params.Etcd2Params = &topology.Etcd2Params{ Endpoints: etcd2config.GetEndpoints(), Username: etcd2config.GetUsername(), - Password: etcd2Password, + Password: string(password), LockDelay: etcd2config.GetLockDelay(), Prefix: etcd2config.GetPrefix(), } case api.FailoverStateProviderStateboard: - var ( - stateboardPassword string - err error - ) - stateboardConfig := config.GetStateboardConfig() stateboardPasswordRef := stateboardConfig.GetPassword() - if stateboardPasswordRef.Name != "" { - stateboardPassword, err = ctrl.GetResourcesManager().GetSecretValue( + if stateboardPasswordRef.GetName() != "" { + secret, err = ctrl.GetResourcesManager().GetSecret( ctx, - stateboardPasswordRef.Namespace, - stateboardPasswordRef.Name, - "stateboard-password", // fixme: make not hardcoded + stateboardPasswordRef.GetNamespace(), + stateboardPasswordRef.GetName(), ) if err != nil { return nil, err } + + stateboardSecretKey := stateboardPasswordRef.GetKey() + if stateboardSecretKey == "" { + stateboardSecretKey = "stateboard-password" + } + + password, ok = secret.Data[stateboardSecretKey] + if !ok { + return nil, errors.New("no such key") + } } params.StateboardParams = &topology.StateboardParams{ URI: stateboardConfig.GetURI(), - Password: stateboardPassword, + Password: string(password), + } + } + + if secret != nil { + err = controllerutil.SetOwnerReference(ctx.GetCluster(), secret, ctrl.GetScheme()) + if err != nil { + return nil, err + } + + err = ctrl.Update(ctx, secret) + if err != nil { + return nil, err } } } diff --git a/pkg/reconciliation/steps/role/ensure_cartridge_ready.go b/pkg/reconciliation/steps/role/ensure_cartridge_ready.go index 4cdc0e77..0b16f745 100644 --- a/pkg/reconciliation/steps/role/ensure_cartridge_ready.go +++ b/pkg/reconciliation/steps/role/ensure_cartridge_ready.go @@ -1,6 +1,8 @@ package role import ( + "time" + "github.com/tarantool/tarantool-operator/pkg/api" . "github.com/tarantool/tarantool-operator/pkg/reconciliation" "github.com/tarantool/tarantool-operator/pkg/utils" @@ -14,10 +16,14 @@ func (r *EnsureCartridgeReadyStep[RoleType, CtxType, CtrlType]) GetName() string func (r *EnsureCartridgeReadyStep[RoleType, CtxType, CtrlType]) Reconcile(ctx CtxType, ctrl CtrlType) (*Result, error) { allPodsRunning, err := r.IsCartridgeReady(ctx, ctrl) - if err != nil || !allPodsRunning { + if err != nil { + return Error(err) + } + + if !allPodsRunning { ctx.GetLogger().Info("Not all pods of role are running. Wait for it.") - return Error(err) + return Requeue(time.Second * 10) } return NextStep() @@ -42,16 +48,29 @@ func (r *EnsureCartridgeReadyStep[RoleType, CtxType, CtrlType]) IsCartridgeReady readyCount := int32(0) for key := range pods.Items { - if utils.IsPodDeleting(&pods.Items[key]) { + pod := &pods.Items[key] + + if utils.IsPodDeleting(pod) { continue } - if !utils.IsPodDefaultContainerReady(&pods.Items[key]) { + if !utils.IsPodRunning(pod) { + continue + } + + started, err := ctrl.GetTopology().IsCartridgeStarted(ctx, pod) + if err != nil { + return false, err + } + + if !started { continue } readyCount++ } + role.SetReadyPodsCount(readyCount) + return readyCount >= expectedCount, nil } diff --git a/pkg/topology/common.go b/pkg/topology/common.go index f2ae9a19..94e46452 100644 --- a/pkg/topology/common.go +++ b/pkg/topology/common.go @@ -379,7 +379,7 @@ func (r *CommonCartridgeTopology) ApplyCartridgeConfig(ctx context.Context, lead end return cartridge.config_patch_clusterwide(safeConfig) -` + ` var res bool @@ -394,3 +394,61 @@ func (r *CommonCartridgeTopology) ApplyCartridgeConfig(ctx context.Context, lead return nil } + +func (r *CommonCartridgeTopology) IsCartridgeStarted(ctx context.Context, pod *v1.Pod) (bool, error) { + // language=lua + lua := ` + local confapplier = require('cartridge.confapplier') + local state = confapplier.get_state() + + if state == '' then + return { res = false, err=nil } + end + + if state == 'InitError' or state == 'BootError' or state == 'OperationError' or state == 'ReloadError' then + return { res = false, err=nil } + end + + return { res = true, err=nil } + ` + + var res *BooleanResult + + err := r.Exec(ctx, pod, &res, lua) + if err != nil { + return false, errors.Wrap(err, "unable to retrieve instance state") + } + + if res.Err != nil { + return res.Res, errors.Wrap(res.Err, "unable to retrieve instance state") + } + + return res.Res, nil +} + +func (r *CommonCartridgeTopology) IsCartridgeConfigured(ctx context.Context, pod *v1.Pod) (bool, error) { + // language=lua + lua := ` + local confapplier = require('cartridge.confapplier') + local state = confapplier.get_state() + + if state ~= 'RolesConfigured' then + return { res = false, err=nil } + end + + return { res = true, err=nil } + ` + + var res *BooleanResult + + err := r.Exec(ctx, pod, &res, lua) + if err != nil { + return false, errors.Wrap(err, "unable to retrieve instance state") + } + + if res.Err != nil { + return res.Res, errors.Wrap(res.Err, "unable to retrieve instance state") + } + + return res.Res, nil +} diff --git a/pkg/topology/topology.go b/pkg/topology/topology.go index 1ff125f2..a5e09638 100644 --- a/pkg/topology/topology.go +++ b/pkg/topology/topology.go @@ -37,4 +37,7 @@ type CartridgeTopology interface { GetCartridgeConfig(ctx context.Context, leader *v1.Pod) (CartridgeConfigData, error) ApplyCartridgeConfig(ctx context.Context, leader *v1.Pod, config CartridgeConfigData) error + + IsCartridgeStarted(ctx context.Context, pod *v1.Pod) (bool, error) + IsCartridgeConfigured(ctx context.Context, pod *v1.Pod) (bool, error) } diff --git a/test/mocks/topology.go b/test/mocks/topology.go index df976286..28a75c75 100644 --- a/test/mocks/topology.go +++ b/test/mocks/topology.go @@ -105,3 +105,15 @@ func (f *FakeCartridgeTopology) ApplyCartridgeConfig(ctx context.Context, leader return args.Error(0) } + +func (f *FakeCartridgeTopology) IsCartridgeStarted(ctx context.Context, pod *v1.Pod) (bool, error) { + args := f.Called(ctx, pod) + + return args.Bool(0), args.Error(1) +} + +func (f *FakeCartridgeTopology) IsCartridgeConfigured(ctx context.Context, pod *v1.Pod) (bool, error) { + args := f.Called(ctx, pod) + + return args.Bool(0), args.Error(1) +} diff --git a/test/resources/cartridge.go b/test/resources/cartridge.go index 562857ff..97b3c978 100644 --- a/test/resources/cartridge.go +++ b/test/resources/cartridge.go @@ -58,10 +58,13 @@ func (r *FakeCartridge) WithNamespace(namespace string) *FakeCartridge { return r } -func (r *FakeCartridge) BuildFakeClient() client.WithWatch { +func (r *FakeCartridge) NewFakeClientBuilder() *fake.ClientBuilder { fakeClientBuilder := fake.NewClientBuilder() return fakeClientBuilder. - WithObjects(r.objects...). - Build() + WithObjects(r.objects...) +} + +func (r *FakeCartridge) BuildFakeClient() client.WithWatch { + return r.NewFakeClientBuilder().Build() } diff --git a/test/resources/cluster.go b/test/resources/cluster.go index edc47a56..5b85e4e0 100644 --- a/test/resources/cluster.go +++ b/test/resources/cluster.go @@ -11,3 +11,9 @@ func (r *FakeCartridge) Bootstrapped() *FakeCartridge { return r } + +func (r *FakeCartridge) WithLeader(leaderName string) *FakeCartridge { + r.Cluster.Status.Leader = leaderName + + return r +} diff --git a/test/resources/pod.go b/test/resources/pod.go index 5e381cf3..232d4bb4 100644 --- a/test/resources/pod.go +++ b/test/resources/pod.go @@ -51,6 +51,15 @@ func (r *FakeCartridge) WithStoragePodsCreated() *FakeCartridge { return r.WithPodsCreated(RoleStorage) } +func (r *FakeCartridge) WithAllPodsRunning() *FakeCartridge { + for _, pod := range r.Pods { + r.setPodRunning(pod) + r.setPodContainerReady(pod, PodContainerName) + } + + return r +} + func (r *FakeCartridge) WithAllPodsReady() *FakeCartridge { for _, pod := range r.Pods { r.setPodReady(pod) @@ -60,12 +69,59 @@ func (r *FakeCartridge) WithAllPodsReady() *FakeCartridge { return r } +func (r *FakeCartridge) WithPodsRunning(names ...string) *FakeCartridge { + namesMap := make(map[string]bool, len(names)) + for _, name := range names { + namesMap[name] = true + } + + for _, pod := range r.Pods { + _, ok := namesMap[pod.GetName()] + if ok { + r.setPodRunning(pod) + r.setPodContainerReady(pod, PodContainerName) + } + } + + return r +} + +func (r *FakeCartridge) WithPodsReady(names ...string) *FakeCartridge { + namesMap := make(map[string]bool, len(names)) + for _, name := range names { + namesMap[name] = true + } + + for _, pod := range r.Pods { + _, ok := namesMap[pod.GetName()] + if ok { + r.setPodRunning(pod) + r.setPodReady(pod) + r.setPodContainerReady(pod, PodContainerName) + } + } + + return r +} + +func (r *FakeCartridge) WithAllPodsDeleting() *FakeCartridge { + for _, pod := range r.Pods { + r.setPodDeleting(pod) + r.setPodContainerReady(pod, PodContainerName) + } + + return r +} + +func (r *FakeCartridge) setPodRunning(pod *v1.Pod) { + pod.Status.Phase = v1.PodRunning +} + func (r *FakeCartridge) setPodReady(pod *v1.Pod) { if pod.Status.Conditions == nil { pod.Status.Conditions = []v1.PodCondition{} } - pod.Status.Phase = v1.PodRunning pod.Status.Conditions = append(pod.Status.Conditions, v1.PodCondition{ Type: v1.PodReady, Status: v1.ConditionTrue, @@ -74,6 +130,12 @@ func (r *FakeCartridge) setPodReady(pod *v1.Pod) { }) } +func (r *FakeCartridge) setPodDeleting(pod *v1.Pod) { + now := metav1.Now() + pod.DeletionTimestamp = &now +} + +//nolint:unparam func (r *FakeCartridge) setPodContainerReady(pod *v1.Pod, containerName string) { if pod.Status.ContainerStatuses == nil { pod.Status.ContainerStatuses = []v1.ContainerStatus{} From 235610597cc0e4b3316022ab613f78d7de88d950 Mon Sep 17 00:00:00 2001 From: "andrey.klyuev" Date: Mon, 19 Dec 2022 12:42:12 +0300 Subject: [PATCH 2/2] Fix tests --- .golangci.yml | 1 + CHANGELOG.md | 4 ++ apis/v1beta1/cluster_types.go | 4 +- controllers/cluster_controller_test.go | 10 +++- pkg/election/election_test.go | 64 +++++++++++++------------- 5 files changed, 48 insertions(+), 35 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 82aba833..a00d0528 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -46,6 +46,7 @@ linters: - tagliatelle - varnamelen - nilnil + - godot issues: exclude-rules: diff --git a/CHANGELOG.md b/CHANGELOG.md index 5b71b087..52f01ae2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,10 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). +## [unpublished] +- Add ability to specify key in failover password secret +- Improve leader election logic + ## [1.0.0-rc1] ### Added diff --git a/apis/v1beta1/cluster_types.go b/apis/v1beta1/cluster_types.go index 0272b889..5e990de8 100644 --- a/apis/v1beta1/cluster_types.go +++ b/apis/v1beta1/cluster_types.go @@ -172,8 +172,8 @@ func (in *FailoverStateboard) GetPassword() api.SecretKeyReference { return &in.Password } -// SecretKeyReference represents a reference to filed in Secret. It has enough information to retrieve a value secret in any namespace -// +structType=atomic. +// SecretKeyReference represents a reference to filed in Secret. It has enough information to retrieve a value secret in any namespace. +// +structType=atomic type SecretKeyReference struct { // Namespace defines the space within which the secret name must be unique. // +optional diff --git a/controllers/cluster_controller_test.go b/controllers/cluster_controller_test.go index 1188d0e1..8de54590 100644 --- a/controllers/cluster_controller_test.go +++ b/controllers/cluster_controller_test.go @@ -70,6 +70,7 @@ var _ = Describe("cluster_controller unit testing", func() { Client: fakeClient, Recorder: eventsRecorder, ResourcesManager: resourcesManager, + Topology: fakeTopologyService, }, ResourcesManager: resourcesManager, LabelsManager: labelsManager, @@ -122,13 +123,17 @@ var _ = Describe("cluster_controller unit testing", func() { It("Must bootstrap cluster if all roles ready", func() { cartridge. WithAllRolesInPhase(v1beta1.RoleWaitingForBootstrap). - WithAllPodsReady() + WithAllPodsRunning() fakeTopologyService. On("BootstrapVshard", mock.Anything, mock.Anything). Return(nil). Once() + fakeTopologyService. + On("IsCartridgeStarted", mock.Anything, mock.Anything). + Return(true, nil) + fakeClient := cartridge.BuildFakeClient() resourcesManager := &ResourcesManager{ @@ -158,6 +163,7 @@ var _ = Describe("cluster_controller unit testing", func() { Client: fakeClient, Recorder: eventsRecorder, ResourcesManager: resourcesManager, + Topology: fakeTopologyService, }, ResourcesManager: resourcesManager, LabelsManager: labelsManager, @@ -208,6 +214,7 @@ var _ = Describe("cluster_controller unit testing", func() { Client: fakeClient, Recorder: eventsRecorder, ResourcesManager: resourcesManager, + Topology: fakeTopologyService, }, ResourcesManager: resourcesManager, LabelsManager: labelsManager, @@ -260,6 +267,7 @@ var _ = Describe("cluster_controller unit testing", func() { Client: fakeClient, Recorder: eventsRecorder, ResourcesManager: resourcesManager, + Topology: fakeTopologyService, }, ResourcesManager: resourcesManager, LabelsManager: labelsManager, diff --git a/pkg/election/election_test.go b/pkg/election/election_test.go index 72a150c6..d76f02f8 100644 --- a/pkg/election/election_test.go +++ b/pkg/election/election_test.go @@ -74,10 +74,10 @@ var _ = Describe("election unit testing", func() { It("should skip pod which is not running", func() { cartridge.WithRouterRole(2, 1) cartridge.WithStorageRole(2, 2) - cartridge.WithStatefulSetsCreated("router") - cartridge.WithStatefulSetsCreated("storage") - cartridge.WithPodsCreated("router") - cartridge.WithPodsCreated("storage") + cartridge.WithRouterStatefulSetsCreated() + cartridge.WithStorageStatefulSetsCreated() + cartridge.WithRouterPodsCreated() + cartridge.WithStoragePodsCreated() fakeClient := cartridge.BuildFakeClient() fakeTopologyService := new(mocks.FakeCartridgeTopology) @@ -93,10 +93,10 @@ var _ = Describe("election unit testing", func() { It("should skip deleting pod", func() { cartridge.WithRouterRole(2, 1) cartridge.WithStorageRole(2, 2) - cartridge.WithStatefulSetsCreated("router") - cartridge.WithStatefulSetsCreated("storage") - cartridge.WithPodsCreated("router") - cartridge.WithPodsCreated("storage") + cartridge.WithRouterStatefulSetsCreated() + cartridge.WithStorageStatefulSetsCreated() + cartridge.WithRouterPodsCreated() + cartridge.WithStoragePodsCreated() cartridge.WithAllPodsDeleting() fakeClient := cartridge.BuildFakeClient() @@ -113,10 +113,10 @@ var _ = Describe("election unit testing", func() { It("should skip pods where cartridge not started", func() { cartridge.WithRouterRole(1, 1) cartridge.WithStorageRole(1, 1) - cartridge.WithStatefulSetsCreated("router") - cartridge.WithStatefulSetsCreated("storage") - cartridge.WithPodsCreated("router") - cartridge.WithPodsCreated("storage") + cartridge.WithRouterStatefulSetsCreated() + cartridge.WithStorageStatefulSetsCreated() + cartridge.WithRouterPodsCreated() + cartridge.WithStoragePodsCreated() cartridge.WithAllPodsRunning() fakeClient := cartridge.BuildFakeClient() @@ -136,10 +136,10 @@ var _ = Describe("election unit testing", func() { It("should elected any non-foreign running pod", func() { cartridge.WithRouterRole(1, 1) cartridge.WithStorageRole(1, 1) - cartridge.WithStatefulSetsCreated("router") - cartridge.WithStatefulSetsCreated("storage") - cartridge.WithPodsCreated("router") - cartridge.WithPodsCreated("storage") + cartridge.WithRouterStatefulSetsCreated() + cartridge.WithStorageStatefulSetsCreated() + cartridge.WithRouterPodsCreated() + cartridge.WithStoragePodsCreated() cartridge.WithAllPodsRunning() fakeClient := cartridge.NewFakeClientBuilder().WithObjects( @@ -169,10 +169,10 @@ var _ = Describe("election unit testing", func() { It("should not change leader if it's alive, and has config", func() { cartridge.WithRouterRole(1, 1) cartridge.WithStorageRole(1, 1) - cartridge.WithStatefulSetsCreated("router") - cartridge.WithStatefulSetsCreated("storage") - cartridge.WithPodsCreated("router") - cartridge.WithPodsCreated("storage") + cartridge.WithRouterStatefulSetsCreated() + cartridge.WithStorageStatefulSetsCreated() + cartridge.WithRouterPodsCreated() + cartridge.WithStoragePodsCreated() cartridge.WithAllPodsRunning() cartridge.WithLeader("router-0-0") cartridge.Bootstrapped() @@ -195,10 +195,10 @@ var _ = Describe("election unit testing", func() { It("should skip pod where no config present", func() { cartridge.WithRouterRole(1, 1) cartridge.WithStorageRole(2, 1) - cartridge.WithStatefulSetsCreated("router") - cartridge.WithStatefulSetsCreated("storage") - cartridge.WithPodsCreated("router") - cartridge.WithPodsCreated("storage") + cartridge.WithRouterStatefulSetsCreated() + cartridge.WithStorageStatefulSetsCreated() + cartridge.WithRouterPodsCreated() + cartridge.WithStoragePodsCreated() cartridge.WithPodsRunning("storage-0-0", "storage-1-0") cartridge.WithLeader("router-0-0") cartridge.Bootstrapped() @@ -231,10 +231,10 @@ var _ = Describe("election unit testing", func() { It("should elect same leader if it's running", func() { cartridge.WithRouterRole(1, 1) cartridge.WithStorageRole(2, 1) - cartridge.WithStatefulSetsCreated("router") - cartridge.WithStatefulSetsCreated("storage") - cartridge.WithPodsCreated("router") - cartridge.WithPodsCreated("storage") + cartridge.WithRouterStatefulSetsCreated() + cartridge.WithStorageStatefulSetsCreated() + cartridge.WithRouterPodsCreated() + cartridge.WithStoragePodsCreated() cartridge.WithAllPodsRunning() cartridge.WithLeader("router-0-0") @@ -255,10 +255,10 @@ var _ = Describe("election unit testing", func() { It("should error if leader is not running", func() { cartridge.WithRouterRole(1, 1) cartridge.WithStorageRole(2, 1) - cartridge.WithStatefulSetsCreated("router") - cartridge.WithStatefulSetsCreated("storage") - cartridge.WithPodsCreated("router") - cartridge.WithPodsCreated("storage") + cartridge.WithRouterStatefulSetsCreated() + cartridge.WithStorageStatefulSetsCreated() + cartridge.WithRouterPodsCreated() + cartridge.WithStoragePodsCreated() cartridge.WithPodsRunning("storage-0-0", "storage-1-0") cartridge.WithLeader("router-0-0")