diff --git a/internal/k8s/cluster/common.go b/internal/k8s/cluster/common.go new file mode 100644 index 0000000..8b1aaa8 --- /dev/null +++ b/internal/k8s/cluster/common.go @@ -0,0 +1,108 @@ +package cluster + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "os" + "os/exec" +) + +// GetNodesViaKubectl retrieves node information using kubectl. +// This is a shared utility used by multiple providers to get node status +// after kubeconfig has been configured. +func GetNodesViaKubectl(ctx context.Context) ([]NodeInfo, error) { + cmd := exec.CommandContext(ctx, "kubectl", "get", "nodes", "-o", "json") + cmd.Env = os.Environ() + + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + if err := cmd.Run(); err != nil { + return nil, fmt.Errorf("kubectl failed: %w, stderr: %s", err, stderr.String()) + } + + var nodeList struct { + Items []struct { + Metadata struct { + Name string `json:"name"` + Labels map[string]string `json:"labels"` + } `json:"metadata"` + Status struct { + Addresses []struct { + Type string `json:"type"` + Address string `json:"address"` + } `json:"addresses"` + Conditions []struct { + Type string `json:"type"` + Status string `json:"status"` + } `json:"conditions"` + } `json:"status"` + } `json:"items"` + } + + if err := json.Unmarshal(stdout.Bytes(), &nodeList); err != nil { + return nil, err + } + + nodes := make([]NodeInfo, 0, len(nodeList.Items)) + for _, item := range nodeList.Items { + node := NodeInfo{ + Name: item.Metadata.Name, + Labels: item.Metadata.Labels, + Role: "worker", + } + + // Get addresses + for _, addr := range item.Status.Addresses { + switch addr.Type { + case "InternalIP": + node.InternalIP = addr.Address + case "ExternalIP": + node.ExternalIP = addr.Address + } + } + + // Get status from conditions + for _, cond := range item.Status.Conditions { + if cond.Type == "Ready" { + if cond.Status == "True" { + node.Status = "Ready" + } else { + node.Status = "NotReady" + } + break + } + } + + nodes = append(nodes, node) + } + + return nodes, nil +} + +// CountReadyNodes returns the number of ready nodes from a node list +func CountReadyNodes(nodes []NodeInfo) int { + count := 0 + for _, node := range nodes { + if node.Status == "Ready" { + count++ + } + } + return count +} + +// AllNodesReady checks if all nodes are in Ready state +func AllNodesReady(nodes []NodeInfo) bool { + if len(nodes) == 0 { + return false + } + for _, node := range nodes { + if node.Status != "Ready" { + return false + } + } + return true +} diff --git a/internal/k8s/cluster/common_test.go b/internal/k8s/cluster/common_test.go new file mode 100644 index 0000000..fc6bea8 --- /dev/null +++ b/internal/k8s/cluster/common_test.go @@ -0,0 +1,138 @@ +package cluster + +import ( + "testing" +) + +func TestCountReadyNodes(t *testing.T) { + tests := []struct { + name string + nodes []NodeInfo + expected int + }{ + { + name: "empty list", + nodes: []NodeInfo{}, + expected: 0, + }, + { + name: "all ready", + nodes: []NodeInfo{ + {Name: "node-1", Status: "Ready"}, + {Name: "node-2", Status: "Ready"}, + {Name: "node-3", Status: "Ready"}, + }, + expected: 3, + }, + { + name: "some ready", + nodes: []NodeInfo{ + {Name: "node-1", Status: "Ready"}, + {Name: "node-2", Status: "NotReady"}, + {Name: "node-3", Status: "Ready"}, + }, + expected: 2, + }, + { + name: "none ready", + nodes: []NodeInfo{ + {Name: "node-1", Status: "NotReady"}, + {Name: "node-2", Status: "NotReady"}, + }, + expected: 0, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := CountReadyNodes(tt.nodes) + if result != tt.expected { + t.Errorf("CountReadyNodes() = %d, want %d", result, tt.expected) + } + }) + } +} + +func TestAllNodesReady(t *testing.T) { + tests := []struct { + name string + nodes []NodeInfo + expected bool + }{ + { + name: "empty list", + nodes: []NodeInfo{}, + expected: false, + }, + { + name: "all ready", + nodes: []NodeInfo{ + {Name: "node-1", Status: "Ready"}, + {Name: "node-2", Status: "Ready"}, + }, + expected: true, + }, + { + name: "one not ready", + nodes: []NodeInfo{ + {Name: "node-1", Status: "Ready"}, + {Name: "node-2", Status: "NotReady"}, + }, + expected: false, + }, + { + name: "single ready node", + nodes: []NodeInfo{ + {Name: "node-1", Status: "Ready"}, + }, + expected: true, + }, + { + name: "single not ready node", + nodes: []NodeInfo{ + {Name: "node-1", Status: "NotReady"}, + }, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := AllNodesReady(tt.nodes) + if result != tt.expected { + t.Errorf("AllNodesReady() = %v, want %v", result, tt.expected) + } + }) + } +} + +func TestNodeInfoFields(t *testing.T) { + node := NodeInfo{ + Name: "worker-node-1", + Role: "worker", + Status: "Ready", + InternalIP: "10.0.0.5", + ExternalIP: "203.0.113.10", + InstanceID: "i-1234567890abcdef0", + Labels: map[string]string{ + "node.kubernetes.io/instance-type": "t3.medium", + "topology.kubernetes.io/zone": "us-east-1a", + }, + } + + if node.Name != "worker-node-1" { + t.Errorf("expected name 'worker-node-1', got %q", node.Name) + } + if node.Role != "worker" { + t.Errorf("expected role 'worker', got %q", node.Role) + } + if node.Status != "Ready" { + t.Errorf("expected status 'Ready', got %q", node.Status) + } + if node.InternalIP != "10.0.0.5" { + t.Errorf("expected internal IP '10.0.0.5', got %q", node.InternalIP) + } + if node.Labels["node.kubernetes.io/instance-type"] != "t3.medium" { + t.Errorf("expected instance type label 't3.medium', got %q", node.Labels["node.kubernetes.io/instance-type"]) + } +} diff --git a/internal/k8s/cluster/eks.go b/internal/k8s/cluster/eks.go index 9e90e98..625018d 100644 --- a/internal/k8s/cluster/eks.go +++ b/internal/k8s/cluster/eks.go @@ -961,74 +961,7 @@ func (p *EKSProvider) waitForNodeGroupDeleted(ctx context.Context, clusterName, } func (p *EKSProvider) getNodesViaKubectl(ctx context.Context) ([]NodeInfo, error) { - cmd := exec.CommandContext(ctx, "kubectl", "get", "nodes", "-o", "json") - cmd.Env = os.Environ() - - var stdout, stderr bytes.Buffer - cmd.Stdout = &stdout - cmd.Stderr = &stderr - - if err := cmd.Run(); err != nil { - return nil, fmt.Errorf("kubectl failed: %w, stderr: %s", err, stderr.String()) - } - - var nodeList struct { - Items []struct { - Metadata struct { - Name string `json:"name"` - Labels map[string]string `json:"labels"` - } `json:"metadata"` - Status struct { - Addresses []struct { - Type string `json:"type"` - Address string `json:"address"` - } `json:"addresses"` - Conditions []struct { - Type string `json:"type"` - Status string `json:"status"` - } `json:"conditions"` - } `json:"status"` - } `json:"items"` - } - - if err := json.Unmarshal(stdout.Bytes(), &nodeList); err != nil { - return nil, err - } - - nodes := make([]NodeInfo, 0, len(nodeList.Items)) - for _, item := range nodeList.Items { - node := NodeInfo{ - Name: item.Metadata.Name, - Labels: item.Metadata.Labels, - Role: "worker", - } - - // Get addresses - for _, addr := range item.Status.Addresses { - switch addr.Type { - case "InternalIP": - node.InternalIP = addr.Address - case "ExternalIP": - node.ExternalIP = addr.Address - } - } - - // Get status - for _, cond := range item.Status.Conditions { - if cond.Type == "Ready" { - if cond.Status == "True" { - node.Status = "Ready" - } else { - node.Status = "NotReady" - } - break - } - } - - nodes = append(nodes, node) - } - - return nodes, nil + return GetNodesViaKubectl(ctx) } func (p *EKSProvider) hasEksctl() bool { diff --git a/internal/k8s/cluster/gke.go b/internal/k8s/cluster/gke.go index d1ff056..75be4a3 100644 --- a/internal/k8s/cluster/gke.go +++ b/internal/k8s/cluster/gke.go @@ -641,74 +641,7 @@ func (p *GKEProvider) waitForNodePoolRunning(ctx context.Context, clusterName, n } func (p *GKEProvider) getNodesViaKubectl(ctx context.Context) ([]NodeInfo, error) { - cmd := exec.CommandContext(ctx, "kubectl", "get", "nodes", "-o", "json") - cmd.Env = os.Environ() - - var stdout, stderr bytes.Buffer - cmd.Stdout = &stdout - cmd.Stderr = &stderr - - if err := cmd.Run(); err != nil { - return nil, fmt.Errorf("kubectl failed: %w, stderr: %s", err, stderr.String()) - } - - var nodeList struct { - Items []struct { - Metadata struct { - Name string `json:"name"` - Labels map[string]string `json:"labels"` - } `json:"metadata"` - Status struct { - Addresses []struct { - Type string `json:"type"` - Address string `json:"address"` - } `json:"addresses"` - Conditions []struct { - Type string `json:"type"` - Status string `json:"status"` - } `json:"conditions"` - } `json:"status"` - } `json:"items"` - } - - if err := json.Unmarshal(stdout.Bytes(), &nodeList); err != nil { - return nil, err - } - - nodes := make([]NodeInfo, 0, len(nodeList.Items)) - for _, item := range nodeList.Items { - node := NodeInfo{ - Name: item.Metadata.Name, - Labels: item.Metadata.Labels, - Role: "worker", - } - - // Get addresses - for _, addr := range item.Status.Addresses { - switch addr.Type { - case "InternalIP": - node.InternalIP = addr.Address - case "ExternalIP": - node.ExternalIP = addr.Address - } - } - - // Get status - for _, cond := range item.Status.Conditions { - if cond.Type == "Ready" { - if cond.Status == "True" { - node.Status = "Ready" - } else { - node.Status = "NotReady" - } - break - } - } - - nodes = append(nodes, node) - } - - return nodes, nil + return GetNodesViaKubectl(ctx) } func (p *GKEProvider) runGcloud(ctx context.Context, project string, args ...string) (string, error) {