Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
94 changes: 52 additions & 42 deletions pkg/provisioner/provisioner.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,16 +37,16 @@ type Provisioner struct {
KubernetesManager kubernetes.KubernetesManager
KubernetesClient k8sclient.KubernetesClient
ClusterClient cluster.ClusterClient
blueprintHandler blueprint.BlueprintHandler
}

// =============================================================================
// Constructor
// =============================================================================

// NewProvisioner creates a new Provisioner instance with the provided runtime and blueprint handler.
// It sets up all required provisioner handlers—terraform stack, kubernetes manager, kubernetes client,
// and cluster client. The cluster client is created based on the cluster driver configuration (talos/omni).
// Components are initialized lazily when needed by the Up() and Down() methods.
// It sets up kubernetes manager and kubernetes client. Terraform stack and cluster client
// are initialized lazily when needed by the Up(), Down(), and WaitForHealth() methods.
// Returns a pointer to the Provisioner struct.
func NewProvisioner(rt *runtime.Runtime, blueprintHandler blueprint.BlueprintHandler, opts ...*Provisioner) *Provisioner {
provisioner := &Provisioner{
Expand Down Expand Up @@ -77,19 +77,6 @@ func NewProvisioner(rt *runtime.Runtime, blueprintHandler blueprint.BlueprintHan
provisioner.KubernetesManager = kubernetes.NewKubernetesManager(provisioner.KubernetesClient)
}

if provisioner.TerraformStack == nil {
if rt.ConfigHandler != nil && rt.ConfigHandler.GetBool("terraform.enabled", false) {
provisioner.TerraformStack = terraforminfra.NewWindsorStack(rt, blueprintHandler)
}
}

if provisioner.ClusterClient == nil {
clusterDriver := rt.ConfigHandler.GetString("cluster.driver", "")
if clusterDriver == "talos" || clusterDriver == "omni" {
provisioner.ClusterClient = cluster.NewTalosClusterClient()
}
}

return provisioner
}

Expand All @@ -107,7 +94,16 @@ func (i *Provisioner) Up(blueprint *blueprintv1alpha1.Blueprint) error {
}

if i.TerraformStack == nil {
return nil
if i.Runtime != nil && i.Runtime.ConfigHandler != nil {
terraformEnabled := i.Runtime.ConfigHandler.GetBool("terraform.enabled", false)
if terraformEnabled {
i.TerraformStack = terraforminfra.NewStack(i.Runtime)
} else {
return nil
}
} else {
return nil
}
}
if err := i.TerraformStack.Up(blueprint); err != nil {
return fmt.Errorf("failed to run terraform up: %w", err)
Expand Down Expand Up @@ -229,37 +225,51 @@ func (i *Provisioner) CheckNodeHealth(ctx context.Context, options NodeHealthChe
return fmt.Errorf("no health checks specified. Use --nodes and/or --k8s-endpoint flags to specify health checks to perform")
}

if hasNodeCheck && i.ClusterClient == nil && !hasK8sCheck {
return fmt.Errorf("no health checks specified. Use --nodes and/or --k8s-endpoint flags to specify health checks to perform")
}

if hasNodeCheck && i.ClusterClient != nil {
defer i.ClusterClient.Close()
if hasNodeCheck {
if i.ClusterClient == nil {
if i.Runtime != nil && i.Runtime.ConfigHandler != nil {
clusterDriver := i.Runtime.ConfigHandler.GetString("cluster.driver", "")
if clusterDriver == "talos" || clusterDriver == "omni" {
i.ClusterClient = cluster.NewTalosClusterClient()
}
}
}

var checkCtx context.Context
var cancel context.CancelFunc
if options.Timeout > 0 {
checkCtx, cancel = context.WithTimeout(ctx, options.Timeout)
} else {
checkCtx, cancel = context.WithCancel(ctx)
if i.ClusterClient == nil {
if !hasK8sCheck {
return fmt.Errorf("no health checks specified. Use --nodes and/or --k8s-endpoint flags to specify health checks to perform")
}
// If we have k8s check, we can continue without cluster client
}
defer cancel()

if err := i.ClusterClient.WaitForNodesHealthy(checkCtx, options.Nodes, options.Version); err != nil {
if hasK8sCheck {
if outputFunc != nil {
outputFunc(fmt.Sprintf("Warning: Cluster client failed (%v), continuing with Kubernetes checks\n", err))
}
if i.ClusterClient != nil {
defer i.ClusterClient.Close()

var checkCtx context.Context
var cancel context.CancelFunc
if options.Timeout > 0 {
checkCtx, cancel = context.WithTimeout(ctx, options.Timeout)
} else {
return fmt.Errorf("nodes failed health check: %w", err)
checkCtx, cancel = context.WithCancel(ctx)
}
} else {
if outputFunc != nil {
message := fmt.Sprintf("All %d nodes are healthy", len(options.Nodes))
if options.Version != "" {
message += fmt.Sprintf(" and running version %s", options.Version)
defer cancel()

if err := i.ClusterClient.WaitForNodesHealthy(checkCtx, options.Nodes, options.Version); err != nil {
if hasK8sCheck {
if outputFunc != nil {
outputFunc(fmt.Sprintf("Warning: Cluster client failed (%v), continuing with Kubernetes checks\n", err))
}
} else {
return fmt.Errorf("nodes failed health check: %w", err)
}
} else {
if outputFunc != nil {
message := fmt.Sprintf("All %d nodes are healthy", len(options.Nodes))
if options.Version != "" {
message += fmt.Sprintf(" and running version %s", options.Version)
}
outputFunc(message)
}
outputFunc(message)
}
}
}
Expand Down
71 changes: 43 additions & 28 deletions pkg/provisioner/provisioner_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ func createTestBlueprint() *blueprintv1alpha1.Blueprint {
type ProvisionerTestMocks struct {
ConfigHandler config.ConfigHandler
Shell *shell.MockShell
TerraformStack *terraforminfra.MockStack
TerraformStack terraforminfra.Stack
KubernetesManager *kubernetes.MockKubernetesManager
KubernetesClient k8sclient.KubernetesClient
ClusterClient *cluster.MockClusterClient
Expand Down Expand Up @@ -99,7 +99,6 @@ func setupProvisionerMocks(t *testing.T, opts ...func(*ProvisionerTestMocks)) *P
return "/test/project", nil
}

terraformStack := terraforminfra.NewMockStack()
kubernetesManager := kubernetes.NewMockKubernetesManager()
kubernetesClient := k8sclient.NewMockKubernetesClient()
clusterClient := cluster.NewMockClusterClient()
Expand All @@ -114,6 +113,10 @@ func setupProvisionerMocks(t *testing.T, opts ...func(*ProvisionerTestMocks)) *P
Shell: mockShell,
}

terraformStack := terraforminfra.NewMockStack()
terraformStack.UpFunc = func(blueprint *blueprintv1alpha1.Blueprint) error { return nil }
terraformStack.DownFunc = func(blueprint *blueprintv1alpha1.Blueprint) error { return nil }

mocks := &ProvisionerTestMocks{
ConfigHandler: configHandler,
Shell: mockShell,
Expand Down Expand Up @@ -159,8 +162,8 @@ func TestNewProvisioner(t *testing.T) {
t.Error("Expected config handler to be set")
}

if provisioner.TerraformStack == nil {
t.Error("Expected terraform stack to be initialized")
if provisioner.TerraformStack != nil {
t.Error("Expected terraform stack to be nil (lazy loaded)")
}

if provisioner.KubernetesManager == nil {
Expand All @@ -185,8 +188,8 @@ func TestNewProvisioner(t *testing.T) {

provisioner := NewProvisioner(mocks.Runtime, mocks.BlueprintHandler)

if provisioner.ClusterClient == nil {
t.Error("Expected cluster client to be created for talos driver")
if provisioner.ClusterClient != nil {
t.Error("Expected cluster client to be nil (lazy loaded)")
}
})

Expand All @@ -203,8 +206,8 @@ func TestNewProvisioner(t *testing.T) {

provisioner := NewProvisioner(mocks.Runtime, mocks.BlueprintHandler)

if provisioner.ClusterClient == nil {
t.Error("Expected cluster client to be created for omni driver")
if provisioner.ClusterClient != nil {
t.Error("Expected cluster client to be nil (lazy loaded)")
}
})

Expand Down Expand Up @@ -268,7 +271,7 @@ func TestNewProvisioner(t *testing.T) {
provisioner := NewProvisioner(mocks.Runtime, mocks.BlueprintHandler)

if provisioner.TerraformStack != nil {
t.Error("Expected terraform stack to be nil when terraform is disabled")
t.Error("Expected terraform stack to be nil (lazy loaded, and disabled in config)")
}
})

Expand All @@ -286,10 +289,6 @@ func TestProvisioner_Up(t *testing.T) {
}
provisioner := NewProvisioner(mocks.Runtime, mocks.BlueprintHandler, opts)

mocks.TerraformStack.UpFunc = func(blueprint *blueprintv1alpha1.Blueprint) error {
return nil
}

blueprint := createTestBlueprint()
err := provisioner.Up(blueprint)

Expand All @@ -315,8 +314,17 @@ func TestProvisioner_Up(t *testing.T) {

t.Run("SuccessSkipsTerraformWhenDisabled", func(t *testing.T) {
mocks := setupProvisionerMocks(t)
mockConfigHandler := mocks.ConfigHandler.(*config.MockConfigHandler)
mockConfigHandler.GetBoolFunc = func(key string, defaultValue ...bool) bool {
if key == "terraform.enabled" {
return false
}
if len(defaultValue) > 0 {
return defaultValue[0]
}
return false
}
provisioner := NewProvisioner(mocks.Runtime, mocks.BlueprintHandler)
provisioner.TerraformStack = nil

blueprint := createTestBlueprint()
err := provisioner.Up(blueprint)
Expand All @@ -328,20 +336,21 @@ func TestProvisioner_Up(t *testing.T) {

t.Run("ErrorTerraformStackUp", func(t *testing.T) {
mocks := setupProvisionerMocks(t)
mockStack := terraforminfra.NewMockStack()
mockStack.UpFunc = func(blueprint *blueprintv1alpha1.Blueprint) error {
return fmt.Errorf("terraform stack up failed")
}
opts := &Provisioner{
TerraformStack: mocks.TerraformStack,
TerraformStack: mockStack,
}
provisioner := NewProvisioner(mocks.Runtime, mocks.BlueprintHandler, opts)

mocks.TerraformStack.UpFunc = func(blueprint *blueprintv1alpha1.Blueprint) error {
return fmt.Errorf("up failed")
}

blueprint := createTestBlueprint()
err := provisioner.Up(blueprint)

if err == nil {
t.Error("Expected error for terraform stack up failure")
return
}

if !strings.Contains(err.Error(), "failed to run terraform up") {
Expand All @@ -359,10 +368,6 @@ func TestProvisioner_Down(t *testing.T) {
}
provisioner := NewProvisioner(mocks.Runtime, mocks.BlueprintHandler, opts)

mocks.TerraformStack.DownFunc = func(blueprint *blueprintv1alpha1.Blueprint) error {
return nil
}

blueprint := createTestBlueprint()
err := provisioner.Down(blueprint)

Expand Down Expand Up @@ -401,15 +406,15 @@ func TestProvisioner_Down(t *testing.T) {

t.Run("ErrorTerraformStackDown", func(t *testing.T) {
mocks := setupProvisionerMocks(t)
mockStack := terraforminfra.NewMockStack()
mockStack.DownFunc = func(blueprint *blueprintv1alpha1.Blueprint) error {
return fmt.Errorf("terraform stack down failed")
}
opts := &Provisioner{
TerraformStack: mocks.TerraformStack,
TerraformStack: mockStack,
}
provisioner := NewProvisioner(mocks.Runtime, mocks.BlueprintHandler, opts)

mocks.TerraformStack.DownFunc = func(blueprint *blueprintv1alpha1.Blueprint) error {
return fmt.Errorf("down failed")
}

blueprint := createTestBlueprint()
err := provisioner.Down(blueprint)

Expand Down Expand Up @@ -1343,6 +1348,16 @@ func TestProvisioner_CheckNodeHealth(t *testing.T) {

t.Run("ErrorNoHealthChecksWhenNodesProvidedButNoClusterClient", func(t *testing.T) {
mocks := setupProvisionerMocks(t)
mockConfigHandler := mocks.ConfigHandler.(*config.MockConfigHandler)
mockConfigHandler.GetStringFunc = func(key string, defaultValue ...string) string {
if key == "cluster.driver" {
return "" // No cluster driver set, so ClusterClient won't be created
}
if len(defaultValue) > 0 {
return defaultValue[0]
}
return ""
}
provisioner := NewProvisioner(mocks.Runtime, mocks.BlueprintHandler)
provisioner.ClusterClient = nil

Expand Down
Loading
Loading