diff --git a/api/v1alpha1/blueprint_types.go b/api/v1alpha1/blueprint_types.go index 3d7cb89da..e3a4af26f 100644 --- a/api/v1alpha1/blueprint_types.go +++ b/api/v1alpha1/blueprint_types.go @@ -173,6 +173,10 @@ type Kustomization struct { // PostBuild is a post-build step to run after the kustomization is applied. PostBuild *PostBuild `yaml:"postBuild,omitempty"` + + // Destroy determines if the kustomization should be destroyed during down operations. + // Defaults to true if not specified. + Destroy *bool `yaml:"destroy,omitempty"` } // PostBuild is a post-build step to run after the kustomization is applied. @@ -414,5 +418,6 @@ func (k *Kustomization) DeepCopy() *Kustomization { Components: slices.Clone(k.Components), Cleanup: slices.Clone(k.Cleanup), PostBuild: postBuildCopy, + Destroy: k.Destroy, } } diff --git a/cmd/down.go b/cmd/down.go index d010d3c69..a38825c1c 100644 --- a/cmd/down.go +++ b/cmd/down.go @@ -13,6 +13,7 @@ var ( cleanFlag bool skipK8sFlag bool skipTerraformFlag bool + skipDockerFlag bool ) var downCmd = &cobra.Command{ @@ -63,6 +64,9 @@ var downCmd = &cobra.Command{ if skipTerraformFlag { ctx = context.WithValue(ctx, "skipTerraform", true) } + if skipDockerFlag { + ctx = context.WithValue(ctx, "skipDocker", true) + } // Execute the down pipeline if err := downPipeline.Execute(ctx); err != nil { @@ -77,5 +81,6 @@ func init() { downCmd.Flags().BoolVar(&cleanFlag, "clean", false, "Clean up context specific artifacts") downCmd.Flags().BoolVar(&skipK8sFlag, "skip-k8s", false, "Skip Kubernetes cleanup (blueprint cleanup)") downCmd.Flags().BoolVar(&skipTerraformFlag, "skip-tf", false, "Skip Terraform cleanup") + downCmd.Flags().BoolVar(&skipDockerFlag, "skip-docker", false, "Skip Docker container cleanup") rootCmd.AddCommand(downCmd) } diff --git a/pkg/blueprint/blueprint_handler.go b/pkg/blueprint/blueprint_handler.go index 6bb32c592..9d102c96c 100644 --- a/pkg/blueprint/blueprint_handler.go +++ b/pkg/blueprint/blueprint_handler.go @@ -1,11 +1,14 @@ package blueprint import ( + "context" "fmt" "os" + "os/signal" "path/filepath" "slices" "strings" + "syscall" "time" _ "embed" @@ -174,7 +177,10 @@ func (b *BaseBlueprintHandler) Write(overwrite ...bool) error { return fmt.Errorf("error creating directory: %w", err) } - cleanedBlueprint := b.createCleanedBlueprint() + cleanedBlueprint := b.blueprint.DeepCopy() + for i := range cleanedBlueprint.TerraformComponents { + cleanedBlueprint.TerraformComponents[i].Values = map[string]any{} + } data, err := b.shims.YamlMarshal(cleanedBlueprint) if err != nil { @@ -263,7 +269,7 @@ func (b *BaseBlueprintHandler) Install() error { spin.Start() defer spin.Stop() - if err := b.createManagedNamespace(constants.DEFAULT_FLUX_SYSTEM_NAMESPACE); err != nil { + if err := b.kubernetesManager.CreateNamespace(constants.DEFAULT_FLUX_SYSTEM_NAMESPACE); err != nil { spin.Stop() fmt.Fprintf(os.Stderr, "โœ—%s - \033[31mFailed\033[0m\n", spin.Suffix) return fmt.Errorf("failed to create namespace: %w", err) @@ -385,27 +391,57 @@ func (b *BaseBlueprintHandler) GetLocalTemplateData() (map[string][]byte, error) return templateData, nil } -// Down orchestrates the teardown of all kustomizations and associated resources, skipping "not found" errors. -// Sequence: -// 1. Suspend all kustomizations and associated helmreleases to prevent reconciliation (ignoring not found errors) -// 2. Apply cleanup kustomizations if defined for resource cleanup tasks -// 3. Wait for cleanup kustomizations to complete -// 4. Delete main kustomizations in reverse dependency order, skipping not found errors -// 5. Delete cleanup kustomizations and their namespace, skipping not found errors -// -// Dependency resolution is handled via topological sorting to ensure correct deletion order. -// A dedicated cleanup namespace is managed for cleanup kustomizations when required. +// Down manages the teardown of kustomizations and related resources, ignoring "not found" errors. +// It suspends kustomizations and helmreleases, applies cleanup kustomizations, waits for completion, +// deletes main kustomizations in reverse dependency order, and removes cleanup kustomizations and namespaces. +// The function filters kustomizations for destruction, sorts them by dependencies, and performs cleanup if specified. +// Dependency resolution is achieved through topological sorting for correct deletion order. func (b *BaseBlueprintHandler) Down() error { - kustomizations := b.getKustomizations() + allKustomizations := b.getKustomizations() + if len(allKustomizations) == 0 { + return nil + } + + var kustomizations []blueprintv1alpha1.Kustomization + for _, k := range allKustomizations { + if k.Destroy == nil || *k.Destroy { + kustomizations = append(kustomizations, k) + } + } + if len(kustomizations) == 0 { return nil } - spin := spinner.New(spinner.CharSets[14], 100*time.Millisecond, spinner.WithColor("green")) - spin.Suffix = " ๐Ÿ—‘๏ธ Tearing down blueprint resources" - spin.Start() - defer spin.Stop() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + sigChan := make(chan os.Signal, 1) + signal.Notify(sigChan, os.Interrupt, syscall.SIGTERM) + go func() { + <-sigChan + fmt.Fprintf(os.Stderr, "\nReceived interrupt signal, cancelling operations...\n") + cancel() + }() + if err := b.destroyKustomizations(ctx, kustomizations); err != nil { + if ctx.Err() == context.Canceled { + return fmt.Errorf("operation cancelled by user: %w", err) + } + return err + } + + return nil +} + +// ============================================================================= +// Private Methods +// ============================================================================= + +// destroyKustomizations removes kustomizations and performs cleanup tasks. +// It sorts kustomizations by dependencies, applies cleanup kustomizations if defined, +// ensures readiness, and deletes them, followed by the main kustomizations. +func (b *BaseBlueprintHandler) destroyKustomizations(ctx context.Context, kustomizations []blueprintv1alpha1.Kustomization) error { deps := make(map[string][]string) for _, k := range kustomizations { deps[k.Name] = k.DependsOn @@ -436,55 +472,29 @@ func (b *BaseBlueprintHandler) Down() error { nameToK[k.Name] = k } - needsCleanupNamespace := false - for _, k := range kustomizations { - if len(k.Cleanup) > 0 { - needsCleanupNamespace = true - break - } - } - - if needsCleanupNamespace { - if err := b.createManagedNamespace("system-cleanup"); err != nil { - spin.Stop() - fmt.Fprintf(os.Stderr, "โœ—%s - \033[31mFailed\033[0m\n", spin.Suffix) - return fmt.Errorf("failed to create system-cleanup namespace: %w", err) + for _, name := range sorted { + select { + case <-ctx.Done(): + return ctx.Err() + default: } - } - for _, name := range sorted { k := nameToK[name] - if err := b.kubernetesManager.SuspendKustomization(k.Name, constants.DEFAULT_FLUX_SYSTEM_NAMESPACE); err != nil { - if !isNotFoundError(err) { - spin.Stop() - fmt.Fprintf(os.Stderr, "โœ—%s - \033[31mFailed\033[0m\n", spin.Suffix) - return fmt.Errorf("failed to suspend kustomization %s: %w", k.Name, err) + if len(k.Cleanup) > 0 { + status, err := b.kubernetesManager.GetKustomizationStatus([]string{k.Name}) + if err != nil { + return fmt.Errorf("failed to check if kustomization %s exists: %w", k.Name, err) } - } - helmReleases, err := b.kubernetesManager.GetHelmReleasesForKustomization(k.Name, constants.DEFAULT_FLUX_SYSTEM_NAMESPACE) - if err != nil { - spin.Stop() - fmt.Fprintf(os.Stderr, "โœ—%s - \033[31mFailed\033[0m\n", spin.Suffix) - return fmt.Errorf("failed to get helmreleases for kustomization %s: %w", k.Name, err) - } - - for _, hr := range helmReleases { - if err := b.kubernetesManager.SuspendHelmRelease(hr.Name, hr.Namespace); err != nil { - if !isNotFoundError(err) { - spin.Stop() - fmt.Fprintf(os.Stderr, "โœ—%s - \033[31mFailed\033[0m\n", spin.Suffix) - return fmt.Errorf("failed to suspend helmrelease %s in namespace %s: %w", hr.Name, hr.Namespace, err) - } + if !status[k.Name] { + continue } - } - } - var cleanupNames []string - for _, name := range sorted { - k := nameToK[name] - if len(k.Cleanup) > 0 { + cleanupSpin := spinner.New(spinner.CharSets[14], 100*time.Millisecond, spinner.WithColor("green")) + cleanupSpin.Suffix = fmt.Sprintf(" ๐Ÿงน Applying cleanup kustomization for %s", k.Name) + cleanupSpin.Start() + cleanupKustomization := &blueprintv1alpha1.Kustomization{ Name: k.Name + "-cleanup", Path: filepath.Join(k.Path, "cleanup"), @@ -499,73 +509,73 @@ func (b *BaseBlueprintHandler) Down() error { SubstituteFrom: []blueprintv1alpha1.SubstituteReference{}, }, } + if err := b.kubernetesManager.ApplyKustomization(b.toKubernetesKustomization(*cleanupKustomization, constants.DEFAULT_FLUX_SYSTEM_NAMESPACE)); err != nil { - spin.Stop() - fmt.Fprintf(os.Stderr, "โœ—%s - \033[31mFailed\033[0m\n", spin.Suffix) return fmt.Errorf("failed to apply cleanup kustomization for %s: %w", k.Name, err) } - cleanupNames = append(cleanupNames, cleanupKustomization.Name) - } - } - for _, name := range sorted { - k := nameToK[name] - if err := b.kubernetesManager.DeleteKustomization(k.Name, constants.DEFAULT_FLUX_SYSTEM_NAMESPACE); err != nil { - spin.Stop() - fmt.Fprintf(os.Stderr, "โœ—%s - \033[31mFailed\033[0m\n", spin.Suffix) - return fmt.Errorf("failed to delete kustomization %s: %w", k.Name, err) - } - } + timeout := b.shims.TimeAfter(30 * time.Second) + ticker := b.shims.NewTicker(2 * time.Second) + defer b.shims.TickerStop(ticker) + + cleanupReady := false + attempts := 0 + maxAttempts := 15 + + for !cleanupReady && attempts < maxAttempts { + select { + case <-ctx.Done(): + return ctx.Err() + case <-timeout: + attempts = maxAttempts + case <-ticker.C: + attempts++ + ready, err := b.kubernetesManager.GetKustomizationStatus([]string{cleanupKustomization.Name}) + if err != nil { + continue + } + if ready[cleanupKustomization.Name] { + cleanupReady = true + } + } + } - spin.Stop() - fmt.Fprintf(os.Stderr, "\033[32mโœ”\033[0m%s - \033[32mDone\033[0m\n", spin.Suffix) + cleanupSpin.Stop() - if err := b.kubernetesManager.WaitForKustomizationsDeleted("โŒ›๏ธ Waiting for kustomizations to be deleted", sorted...); err != nil { - spin.Stop() - fmt.Fprintf(os.Stderr, "โœ—%s - \033[31mFailed\033[0m\n", spin.Suffix) - return fmt.Errorf("failed waiting for kustomizations to be deleted: %w", err) - } + if !cleanupReady { + fmt.Fprintf(os.Stderr, "Warning: Cleanup kustomization %s did not become ready within 30 seconds, proceeding anyway\n", cleanupKustomization.Name) + } + fmt.Fprintf(os.Stderr, "\033[32mโœ”\033[0m ๐Ÿงน Applying cleanup kustomization for %s - \033[32mDone\033[0m\n", k.Name) - if len(cleanupNames) > 0 { - for _, cname := range cleanupNames { - if err := b.kubernetesManager.DeleteKustomization(cname, constants.DEFAULT_FLUX_SYSTEM_NAMESPACE); err != nil { - spin.Stop() - fmt.Fprintf(os.Stderr, "โœ—%s - \033[31mFailed\033[0m\n", spin.Suffix) - return fmt.Errorf("failed to delete cleanup kustomization %s: %w", cname, err) + cleanupDeleteSpin := spinner.New(spinner.CharSets[14], 100*time.Millisecond, spinner.WithColor("green")) + cleanupDeleteSpin.Suffix = fmt.Sprintf(" ๐Ÿ—‘๏ธ Deleting cleanup kustomization %s", cleanupKustomization.Name) + cleanupDeleteSpin.Start() + if err := b.kubernetesManager.DeleteKustomization(cleanupKustomization.Name, constants.DEFAULT_FLUX_SYSTEM_NAMESPACE); err != nil { + return fmt.Errorf("failed to delete cleanup kustomization %s: %w", cleanupKustomization.Name, err) } - } - if err := b.kubernetesManager.WaitForKustomizationsDeleted("โŒ›๏ธ Waiting for cleanup kustomizations to be deleted", cleanupNames...); err != nil { - spin.Stop() - fmt.Fprintf(os.Stderr, "โœ—%s - \033[31mFailed\033[0m\n", spin.Suffix) - return fmt.Errorf("failed waiting for cleanup kustomizations to be deleted: %w", err) + cleanupDeleteSpin.Stop() + fmt.Fprintf(os.Stderr, "\033[32mโœ”\033[0m ๐Ÿ—‘๏ธ Deleting cleanup kustomization %s - \033[32mDone\033[0m\n", cleanupKustomization.Name) } - if err := b.deleteNamespace("system-cleanup"); err != nil { - spin.Stop() - fmt.Fprintf(os.Stderr, "โœ—%s - \033[31mFailed\033[0m\n", spin.Suffix) - return fmt.Errorf("failed to delete system-cleanup namespace: %w", err) + deleteSpin := spinner.New(spinner.CharSets[14], 100*time.Millisecond, spinner.WithColor("green")) + deleteSpin.Suffix = fmt.Sprintf(" ๐Ÿ—‘๏ธ Deleting kustomization %s", k.Name) + deleteSpin.Start() + if err := b.kubernetesManager.DeleteKustomization(k.Name, constants.DEFAULT_FLUX_SYSTEM_NAMESPACE); err != nil { + return fmt.Errorf("failed to delete kustomization %s: %w", k.Name, err) } + + deleteSpin.Stop() + fmt.Fprintf(os.Stderr, "\033[32mโœ”\033[0m ๐Ÿ—‘๏ธ Deleting kustomization %s - \033[32mDone\033[0m\n", k.Name) } return nil } -// ============================================================================= -// Private Methods -// ============================================================================= - -// createCleanedBlueprint returns a deep copy of the blueprint with all Terraform component Values fields removed. -// All Values maps are cleared, as these should not be persisted in the final blueprint.yaml. -func (b *BaseBlueprintHandler) createCleanedBlueprint() *blueprintv1alpha1.Blueprint { - cleaned := b.blueprint.DeepCopy() - for i := range cleaned.TerraformComponents { - cleaned.TerraformComponents[i].Values = map[string]any{} - } - return cleaned -} - -// walkAndCollectTemplates recursively walks through template directories and collects .jsonnet files. +// walkAndCollectTemplates traverses template directories to gather .jsonnet files. +// It updates the provided templateData map with the relative paths and content of +// the .jsonnet files found. The function handles directory recursion and file reading +// errors, returning an error if any operation fails. func (b *BaseBlueprintHandler) walkAndCollectTemplates(templateDir, templateRoot string, templateData map[string][]byte) error { entries, err := b.shims.ReadDir(templateDir) if err != nil { @@ -814,6 +824,10 @@ func (b *BaseBlueprintHandler) getKustomizations() []blueprintv1alpha1.Kustomiza defaultForce := constants.DEFAULT_FLUX_KUSTOMIZATION_FORCE kustomizations[i].Force = &defaultForce } + if kustomizations[i].Destroy == nil { + defaultDestroy := true + kustomizations[i].Destroy = &defaultDestroy + } kustomizations[i].PostBuild = &blueprintv1alpha1.PostBuild{ SubstituteFrom: []blueprintv1alpha1.SubstituteReference{ @@ -1076,14 +1090,6 @@ func (b *BaseBlueprintHandler) calculateMaxWaitTime() time.Duration { return maxPathTime } -func (b *BaseBlueprintHandler) createManagedNamespace(name string) error { - return b.kubernetesManager.CreateNamespace(name) -} - -func (b *BaseBlueprintHandler) deleteNamespace(name string) error { - return b.kubernetesManager.DeleteNamespace(name) -} - // toKubernetesKustomization converts a blueprint kustomization to a Flux kustomization // It handles conversion of dependsOn, patches, and postBuild configurations // It maps blueprint fields to their Flux kustomization equivalents @@ -1134,6 +1140,11 @@ func (b *BaseBlueprintHandler) toKubernetesKustomization(k blueprintv1alpha1.Kus prune = *k.Prune } + deletionPolicy := "MirrorPrune" + if k.Destroy == nil || *k.Destroy { + deletionPolicy = "WaitForTermination" + } + sourceKind := "GitRepository" if b.isOCISource(k.Source) { sourceKind = "OCIRepository" @@ -1153,56 +1164,36 @@ func (b *BaseBlueprintHandler) toKubernetesKustomization(k blueprintv1alpha1.Kus Kind: sourceKind, Name: k.Source, }, - Path: k.Path, - DependsOn: dependsOn, - Interval: interval, - RetryInterval: &retryInterval, - Timeout: &timeout, - Patches: patches, - Force: *k.Force, - PostBuild: postBuild, - Components: k.Components, - Wait: *k.Wait, - Prune: prune, + Path: k.Path, + DependsOn: dependsOn, + Interval: interval, + RetryInterval: &retryInterval, + Timeout: &timeout, + Patches: patches, + Force: *k.Force, + PostBuild: postBuild, + Components: k.Components, + Wait: *k.Wait, + Prune: prune, + DeletionPolicy: deletionPolicy, }, } } -// isOCISource determines whether a given source name or resolved URL corresponds to an OCI repository -// source by examining the URL prefix of the blueprint's main repository and any additional sources, -// or by checking if the input is already a resolved OCI URL. +// isOCISource returns true if the provided sourceNameOrURL is an OCI repository reference. +// It checks if the input is a resolved OCI URL, matches the blueprint's main repository with an OCI URL, +// or matches any additional source with an OCI URL. func (b *BaseBlueprintHandler) isOCISource(sourceNameOrURL string) bool { - // Check if it's already a resolved OCI URL if strings.HasPrefix(sourceNameOrURL, "oci://") { return true } - - // Check if it's a source name that maps to an OCI URL if sourceNameOrURL == b.blueprint.Metadata.Name && strings.HasPrefix(b.blueprint.Repository.Url, "oci://") { return true } - for _, source := range b.blueprint.Sources { if source.Name == sourceNameOrURL && strings.HasPrefix(source.Url, "oci://") { return true } } - return false } - -// isNotFoundError checks if an error is a Kubernetes resource not found error -// This is used during cleanup to ignore errors when resources don't exist -func isNotFoundError(err error) bool { - if err == nil { - return false - } - - errMsg := strings.ToLower(err.Error()) - // Check for resource not found errors, but not namespace not found errors - return (strings.Contains(errMsg, "resource not found") || - strings.Contains(errMsg, "could not find the requested resource") || - strings.Contains(errMsg, "the server could not find the requested resource") || - strings.Contains(errMsg, "\" not found")) && - !strings.Contains(errMsg, "namespace not found") -} diff --git a/pkg/blueprint/blueprint_handler_test.go b/pkg/blueprint/blueprint_handler_test.go index 68d0c07c8..713648ec2 100644 --- a/pkg/blueprint/blueprint_handler_test.go +++ b/pkg/blueprint/blueprint_handler_test.go @@ -226,6 +226,18 @@ func setupShims(t *testing.T) *Shims { }) } + // Override timing shims for fast tests + shims.TimeAfter = func(d time.Duration) <-chan time.Time { + // Return a channel that never fires (no timeout for tests) + return make(chan time.Time) + } + + shims.NewTicker = func(d time.Duration) *time.Ticker { + // Return a ticker that ticks immediately for tests + ticker := time.NewTicker(1 * time.Millisecond) + return ticker + } + return shims } @@ -300,9 +312,7 @@ func setupMocks(t *testing.T, opts ...*SetupOptions) *Mocks { mockKubernetesManager.DeleteKustomizationFunc = func(name, namespace string) error { return nil } - mockKubernetesManager.WaitForKustomizationsDeletedFunc = func(message string, names ...string) error { - return nil - } + mockKubernetesManager.ApplyKustomizationFunc = func(kustomization kustomizev1.Kustomization) error { return nil } @@ -312,6 +322,7 @@ func setupMocks(t *testing.T, opts ...*SetupOptions) *Mocks { mockKubernetesManager.GetKustomizationStatusFunc = func(names []string) (map[string]bool, error) { status := make(map[string]bool) for _, name := range names { + // Return true for all kustomizations, including cleanup ones status[name] = true } return status, nil @@ -1834,42 +1845,6 @@ func TestBlueprintHandler_Down(t *testing.T) { }, expectedError: "delete error", }, - { - name: "SuspendKustomizationError", - kustomizations: []blueprintv1alpha1.Kustomization{ - {Name: "k1"}, - }, - setupMock: func(m *kubernetes.MockKubernetesManager) { - m.SuspendKustomizationFunc = func(name, namespace string) error { - return fmt.Errorf("suspend error") - } - }, - expectedError: "suspend error", - }, - { - name: "ErrorWaitingForKustomizationsDeleted", - kustomizations: []blueprintv1alpha1.Kustomization{ - {Name: "k1"}, - }, - setupMock: func(m *kubernetes.MockKubernetesManager) { - m.WaitForKustomizationsDeletedFunc = func(message string, names ...string) error { - return fmt.Errorf("wait for deletion error") - } - }, - expectedError: "failed waiting for kustomizations to be deleted", - }, - { - name: "ErrorWaitingForCleanupKustomizationsDeleted", - kustomizations: []blueprintv1alpha1.Kustomization{ - {Name: "k1", Cleanup: []string{"cleanup"}}, - }, - setupMock: func(m *kubernetes.MockKubernetesManager) { - m.WaitForKustomizationsDeletedFunc = func(message string, names ...string) error { - return fmt.Errorf("wait for cleanup deletion error") - } - }, - expectedError: "failed waiting for kustomizations to be deleted", - }, } for _, tc := range testCases { @@ -1911,94 +1886,6 @@ func TestBlueprintHandler_Down(t *testing.T) { } }) - t.Run("ErrorCreatingSystemCleanupNamespace", func(t *testing.T) { - // Given a handler with kustomizations that have cleanup paths - handler, mocks := setup(t) - baseHandler := handler - baseHandler.blueprint.Kustomizations = []blueprintv1alpha1.Kustomization{ - {Name: "k1", Cleanup: []string{"cleanup/path"}}, - } - - // And a mock that fails to create system-cleanup namespace - mocks.KubernetesManager.CreateNamespaceFunc = func(name string) error { - if name == "system-cleanup" { - return fmt.Errorf("create namespace error") - } - return nil - } - - // When calling Down - err := baseHandler.Down() - - // Then an error should be returned - if err == nil { - t.Error("Expected error, got nil") - } - if !strings.Contains(err.Error(), "failed to create system-cleanup namespace") { - t.Errorf("Expected system-cleanup namespace creation error, got: %v", err) - } - }) - - t.Run("ErrorGettingHelmReleases", func(t *testing.T) { - // Given a handler with kustomizations - handler, mocks := setup(t) - baseHandler := handler - baseHandler.blueprint.Kustomizations = []blueprintv1alpha1.Kustomization{ - {Name: "k1"}, - } - - // And a mock that fails to get HelmReleases - mocks.KubernetesManager.GetHelmReleasesForKustomizationFunc = func(name, namespace string) ([]helmv2.HelmRelease, error) { - return nil, fmt.Errorf("get helmreleases error") - } - - // When calling Down - err := baseHandler.Down() - - // Then an error should be returned - if err == nil { - t.Error("Expected error, got nil") - } - if !strings.Contains(err.Error(), "failed to get helmreleases for kustomization") { - t.Errorf("Expected helmreleases error, got: %v", err) - } - }) - - t.Run("ErrorSuspendingHelmRelease", func(t *testing.T) { - // Given a handler with kustomizations - handler, mocks := setup(t) - baseHandler := handler - baseHandler.blueprint.Kustomizations = []blueprintv1alpha1.Kustomization{ - {Name: "k1"}, - } - - // And a mock that returns HelmReleases but fails to suspend them - mocks.KubernetesManager.GetHelmReleasesForKustomizationFunc = func(name, namespace string) ([]helmv2.HelmRelease, error) { - return []helmv2.HelmRelease{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "test-helm-release", - Namespace: "test-namespace", - }, - }, - }, nil - } - mocks.KubernetesManager.SuspendHelmReleaseFunc = func(name, namespace string) error { - return fmt.Errorf("suspend helmrelease error") - } - - // When calling Down - err := baseHandler.Down() - - // Then an error should be returned - if err == nil { - t.Error("Expected error, got nil") - } - if !strings.Contains(err.Error(), "failed to suspend helmrelease") { - t.Errorf("Expected helmrelease suspend error, got: %v", err) - } - }) - t.Run("ErrorDeletingCleanupKustomizations", func(t *testing.T) { // Given a handler with kustomizations that have cleanup paths handler, mocks := setup(t) @@ -2027,153 +1914,6 @@ func TestBlueprintHandler_Down(t *testing.T) { } }) - t.Run("ErrorDeletingSystemCleanupNamespace", func(t *testing.T) { - // Given a handler with kustomizations that have cleanup paths - handler, mocks := setup(t) - baseHandler := handler - baseHandler.blueprint.Kustomizations = []blueprintv1alpha1.Kustomization{ - {Name: "k1", Cleanup: []string{"cleanup/path"}}, - } - - // And a mock that fails to delete system-cleanup namespace - mocks.KubernetesManager.DeleteNamespaceFunc = func(name string) error { - if name == "system-cleanup" { - return fmt.Errorf("delete namespace error") - } - return nil - } - - // When calling Down - err := baseHandler.Down() - - // Then an error should be returned - if err == nil { - t.Error("Expected error, got nil") - } - if !strings.Contains(err.Error(), "failed to delete system-cleanup namespace") { - t.Errorf("Expected system-cleanup namespace deletion error, got: %v", err) - } - }) -} - -func TestBaseBlueprintHandler_CreateManagedNamespace(t *testing.T) { - setup := func(t *testing.T) (*BaseBlueprintHandler, *Mocks) { - t.Helper() - mocks := setupMocks(t) - handler := NewBlueprintHandler(mocks.Injector) - handler.shims = mocks.Shims - err := handler.Initialize() - if err != nil { - t.Fatalf("Failed to initialize handler: %v", err) - } - return handler, mocks - } - - t.Run("Success", func(t *testing.T) { - // Given a blueprint handler - handler, mocks := setup(t) - - // And a mock Kubernetes manager that tracks calls - var createdNamespace string - mocks.KubernetesManager.CreateNamespaceFunc = func(name string) error { - createdNamespace = name - return nil - } - - // When creating a managed namespace - err := handler.createManagedNamespace("test-namespace") - - // Then no error should be returned - if err != nil { - t.Errorf("Expected no error, got: %v", err) - } - - // And the correct namespace should be created - if createdNamespace != "test-namespace" { - t.Errorf("Expected namespace 'test-namespace', got: %s", createdNamespace) - } - }) - - t.Run("Error", func(t *testing.T) { - // Given a blueprint handler - handler, mocks := setup(t) - - // And a mock Kubernetes manager that returns an error - mocks.KubernetesManager.CreateNamespaceFunc = func(name string) error { - return fmt.Errorf("mock create error") - } - - // When creating a managed namespace - err := handler.createManagedNamespace("test-namespace") - - // Then an error should be returned - if err == nil { - t.Error("Expected error, got nil") - } - if !strings.Contains(err.Error(), "mock create error") { - t.Errorf("Expected error about create error, got: %v", err) - } - }) -} - -func TestBaseBlueprintHandler_DeleteNamespace(t *testing.T) { - setup := func(t *testing.T) (*BaseBlueprintHandler, *Mocks) { - t.Helper() - mocks := setupMocks(t) - handler := NewBlueprintHandler(mocks.Injector) - handler.shims = mocks.Shims - err := handler.Initialize() - if err != nil { - t.Fatalf("Failed to initialize handler: %v", err) - } - return handler, mocks - } - - t.Run("Success", func(t *testing.T) { - // Given a blueprint handler - handler, mocks := setup(t) - - // And a mock Kubernetes manager that tracks calls - var deletedNamespace string - mocks.KubernetesManager.DeleteNamespaceFunc = func(name string) error { - deletedNamespace = name - return nil - } - - // When deleting a namespace - err := handler.deleteNamespace("test-namespace") - - // Then no error should be returned - if err != nil { - t.Errorf("Expected no error, got: %v", err) - } - - // And the correct namespace should be deleted - if deletedNamespace != "test-namespace" { - t.Errorf("Expected namespace 'test-namespace', got: %s", deletedNamespace) - } - }) - - t.Run("Error", func(t *testing.T) { - // Given a blueprint handler - handler, mocks := setup(t) - - // And a mock Kubernetes manager that returns an error - mocks.KubernetesManager.DeleteNamespaceFunc = func(name string) error { - return fmt.Errorf("mock delete error") - } - - // When deleting a namespace - err := handler.deleteNamespace("test-namespace") - - // Then an error should be returned - if err == nil { - t.Error("Expected error, got nil") - } - if !strings.Contains(err.Error(), "mock delete error") { - t.Errorf("Expected error about delete error, got: %v", err) - } - }) } func TestBlueprintHandler_GetRepository(t *testing.T) { diff --git a/pkg/kubernetes/kubernetes_client.go b/pkg/kubernetes/kubernetes_client.go index 135869567..c112419af 100644 --- a/pkg/kubernetes/kubernetes_client.go +++ b/pkg/kubernetes/kubernetes_client.go @@ -137,16 +137,11 @@ func (c *DynamicKubernetesClient) GetNodeReadyStatus(ctx context.Context, nodeNa Resource: "nodes", } - var nodes *unstructured.UnstructuredList - var err error - - // Get all nodes and filter by name if specific nodes are requested - nodes, err = c.client.Resource(nodeGVR).List(ctx, metav1.ListOptions{}) + nodes, err := c.client.Resource(nodeGVR).List(ctx, metav1.ListOptions{}) if err != nil { return nil, fmt.Errorf("failed to list nodes: %w", err) } - // Filter nodes if specific node names are requested if len(nodeNames) > 0 { var filteredNodes []unstructured.Unstructured nodeNameSet := make(map[string]bool) @@ -160,14 +155,9 @@ func (c *DynamicKubernetesClient) GetNodeReadyStatus(ctx context.Context, nodeNa } } - // Replace the items with filtered ones nodes.Items = filteredNodes } - if err != nil { - return nil, fmt.Errorf("failed to list nodes: %w", err) - } - readyStatus := make(map[string]bool) for _, node := range nodes.Items { nodeName := node.GetName() diff --git a/pkg/kubernetes/kubernetes_manager.go b/pkg/kubernetes/kubernetes_manager.go index f960c9997..ae6be8b61 100644 --- a/pkg/kubernetes/kubernetes_manager.go +++ b/pkg/kubernetes/kubernetes_manager.go @@ -43,7 +43,6 @@ type KubernetesManager interface { SuspendHelmRelease(name, namespace string) error ApplyGitRepository(repo *sourcev1.GitRepository) error ApplyOCIRepository(repo *sourcev1.OCIRepository) error - WaitForKustomizationsDeleted(message string, names ...string) error CheckGitRepositoryStatus() error GetKustomizationStatus(names []string) (map[string]bool, error) WaitForKubernetesHealthy(ctx context.Context, endpoint string, outputFunc func(string), nodeNames ...string) error @@ -117,7 +116,9 @@ func (k *BaseKubernetesManager) ApplyKustomization(kustomization kustomizev1.Kus return k.applyWithRetry(gvr, obj, opts) } -// DeleteKustomization removes a Kustomization resource +// DeleteKustomization removes a Kustomization resource using background deletion. +// Background deletion allows the kustomization to enter "Terminating" state while its +// children are deleted in the background. The method waits for the deletion to complete. func (k *BaseKubernetesManager) DeleteKustomization(name, namespace string) error { gvr := schema.GroupVersionResource{ Group: "kustomize.toolkit.fluxcd.io", @@ -125,11 +126,32 @@ func (k *BaseKubernetesManager) DeleteKustomization(name, namespace string) erro Resource: "kustomizations", } - err := k.client.DeleteResource(gvr, namespace, name, metav1.DeleteOptions{}) + propagationPolicy := metav1.DeletePropagationBackground + deleteOptions := metav1.DeleteOptions{ + PropagationPolicy: &propagationPolicy, + } + + err := k.client.DeleteResource(gvr, namespace, name, deleteOptions) if err != nil && isNotFoundError(err) { return nil } - return err + if err != nil { + return err + } + + timeout := time.Now().Add(k.kustomizationReconcileTimeout) + for time.Now().Before(timeout) { + _, err := k.client.GetResource(gvr, namespace, name) + if err != nil && isNotFoundError(err) { + return nil + } + if err != nil { + return fmt.Errorf("error checking kustomization deletion status: %w", err) + } + time.Sleep(k.kustomizationWaitPollInterval) + } + + return fmt.Errorf("timeout waiting for kustomization %s to be deleted", name) } // WaitForKustomizations waits for kustomizations to be ready @@ -231,7 +253,10 @@ func (k *BaseKubernetesManager) CreateNamespace(name string) error { return k.applyWithRetry(gvr, obj, opts) } -// DeleteNamespace removes a namespace +// DeleteNamespace deletes the specified namespace using foreground deletion. +// Foreground deletion ensures all resources in the namespace are removed before the namespace is deleted. +// This method waits for the deletion to complete before returning. Returns nil if the namespace is deleted successfully, +// or an error if deletion fails or times out. func (k *BaseKubernetesManager) DeleteNamespace(name string) error { gvr := schema.GroupVersionResource{ Group: "", @@ -408,48 +433,8 @@ func (k *BaseKubernetesManager) ApplyOCIRepository(repo *sourcev1.OCIRepository) return k.applyWithRetry(gvr, obj, opts) } -// WaitForKustomizationsDeleted waits for the specified kustomizations to be deleted. -func (k *BaseKubernetesManager) WaitForKustomizationsDeleted(message string, names ...string) error { - spin := spinner.New(spinner.CharSets[14], 100*time.Millisecond, spinner.WithColor("green")) - spin.Suffix = " " + message - spin.Start() - defer spin.Stop() - - timeout := time.After(k.kustomizationReconcileTimeout) - ticker := time.NewTicker(k.kustomizationWaitPollInterval) - defer ticker.Stop() - - for { - select { - case <-timeout: - spin.Stop() - fmt.Fprintf(os.Stderr, "โœ—%s - \033[31mFailed\033[0m\n", spin.Suffix) - return fmt.Errorf("timeout waiting for kustomizations to be deleted") - case <-ticker.C: - allDeleted := true - for _, name := range names { - _, err := k.client.GetResource(schema.GroupVersionResource{ - Group: "kustomize.toolkit.fluxcd.io", - Version: "v1", - Resource: "kustomizations", - }, constants.DEFAULT_FLUX_SYSTEM_NAMESPACE, name) - if err == nil { - allDeleted = false - break - } - } - if allDeleted { - spin.Stop() - fmt.Fprintf(os.Stderr, "\033[32mโœ”\033[0m%s - \033[32mDone\033[0m\n", spin.Suffix) - return nil - } - } - } -} - // CheckGitRepositoryStatus checks the status of all GitRepository and OCIRepository resources func (k *BaseKubernetesManager) CheckGitRepositoryStatus() error { - // Check GitRepositories gitGvr := schema.GroupVersionResource{ Group: "source.toolkit.fluxcd.io", Version: "v1", @@ -474,7 +459,6 @@ func (k *BaseKubernetesManager) CheckGitRepositoryStatus() error { } } - // Check OCIRepositories ociGvr := schema.GroupVersionResource{ Group: "source.toolkit.fluxcd.io", Version: "v1", @@ -502,7 +486,10 @@ func (k *BaseKubernetesManager) CheckGitRepositoryStatus() error { return nil } -// GetKustomizationStatus checks the status of kustomizations +// GetKustomizationStatus returns a map indicating readiness for each specified kustomization in the default +// Flux system namespace. If a kustomization is not found, its status is set to false. If any kustomization +// has a Ready condition with Status False and Reason "ReconciliationFailed", an error is returned with the +// failure message. func (k *BaseKubernetesManager) GetKustomizationStatus(names []string) (map[string]bool, error) { gvr := schema.GroupVersionResource{ Group: "kustomize.toolkit.fluxcd.io", @@ -549,9 +536,9 @@ func (k *BaseKubernetesManager) GetKustomizationStatus(names []string) (map[stri return status, nil } -// WaitForKubernetesHealthy waits for the Kubernetes API to be healthy and optionally checks node Ready state. -// If nodeNames are provided, it will also verify that all specified nodes are in Ready state. -// Returns an error if the API is unreachable or if any specified nodes are not Ready. +// WaitForKubernetesHealthy waits for the Kubernetes API to become healthy within the context deadline. +// If nodeNames are provided, verifies all specified nodes reach Ready state before returning. +// Returns an error if the API is unreachable or any specified nodes are not Ready within the deadline. func (k *BaseKubernetesManager) WaitForKubernetesHealthy(ctx context.Context, endpoint string, outputFunc func(string), nodeNames ...string) error { if k.client == nil { return fmt.Errorf("kubernetes client not initialized") @@ -569,13 +556,11 @@ func (k *BaseKubernetesManager) WaitForKubernetesHealthy(ctx context.Context, en case <-ctx.Done(): return fmt.Errorf("timeout waiting for Kubernetes API to be healthy") default: - // Check API connectivity if err := k.client.CheckHealth(ctx, endpoint); err != nil { time.Sleep(pollInterval) continue } - // If node names are specified, check their Ready state if len(nodeNames) > 0 { if err := k.waitForNodesReady(ctx, nodeNames, outputFunc); err != nil { time.Sleep(pollInterval) @@ -590,8 +575,9 @@ func (k *BaseKubernetesManager) WaitForKubernetesHealthy(ctx context.Context, en return fmt.Errorf("timeout waiting for Kubernetes API to be healthy") } -// waitForNodesReady waits until all specified nodes exist and are in Ready state. -// Returns an error if any nodes are missing or not Ready within the context deadline. +// waitForNodesReady blocks until all specified nodes exist and are in Ready state or the context deadline is reached. +// It periodically queries node status, invokes outputFunc on status changes, and returns an error if any nodes are missing or not Ready within the deadline. +// If the context is cancelled, returns an error immediately. func (k *BaseKubernetesManager) waitForNodesReady(ctx context.Context, nodeNames []string, outputFunc func(string)) error { deadline, ok := ctx.Deadline() if !ok { @@ -626,7 +612,6 @@ func (k *BaseKubernetesManager) waitForNodesReady(ctx context.Context, nodeNames } } - // Report status changes if outputFunc != nil { for _, nodeName := range nodeNames { var currentStatus string diff --git a/pkg/kubernetes/kubernetes_manager_test.go b/pkg/kubernetes/kubernetes_manager_test.go index af85198d1..0ae59b015 100644 --- a/pkg/kubernetes/kubernetes_manager_test.go +++ b/pkg/kubernetes/kubernetes_manager_test.go @@ -203,6 +203,16 @@ func TestBaseKubernetesManager_DeleteKustomization(t *testing.T) { t.Run("Success", func(t *testing.T) { manager := setup(t) + client := NewMockKubernetesClient() + client.DeleteResourceFunc = func(gvr schema.GroupVersionResource, namespace, name string, opts metav1.DeleteOptions) error { + return nil + } + // Mock GetResource to return "not found" immediately to simulate successful deletion + client.GetResourceFunc = func(gvr schema.GroupVersionResource, namespace, name string) (*unstructured.Unstructured, error) { + return nil, fmt.Errorf("the server could not find the requested resource") + } + manager.client = client + err := manager.DeleteKustomization("test-kustomization", "test-namespace") if err != nil { t.Errorf("Expected no error, got %v", err) @@ -236,6 +246,33 @@ func TestBaseKubernetesManager_DeleteKustomization(t *testing.T) { t.Errorf("Expected no error for not found resource, got %v", err) } }) + + t.Run("UsesCorrectDeleteOptions", func(t *testing.T) { + manager := setup(t) + client := NewMockKubernetesClient() + var capturedOptions metav1.DeleteOptions + client.DeleteResourceFunc = func(gvr schema.GroupVersionResource, namespace, name string, opts metav1.DeleteOptions) error { + capturedOptions = opts + return nil + } + // Mock GetResource to return "not found" immediately to simulate successful deletion + client.GetResourceFunc = func(gvr schema.GroupVersionResource, namespace, name string) (*unstructured.Unstructured, error) { + return nil, fmt.Errorf("the server could not find the requested resource") + } + manager.client = client + + err := manager.DeleteKustomization("test-kustomization", "test-namespace") + if err != nil { + t.Errorf("Expected no error, got %v", err) + } + + // Verify the correct delete options were used + if capturedOptions.PropagationPolicy == nil { + t.Error("Expected PropagationPolicy to be set") + } else if *capturedOptions.PropagationPolicy != metav1.DeletePropagationBackground { + t.Errorf("Expected PropagationPolicy to be DeletePropagationBackground, got %s", *capturedOptions.PropagationPolicy) + } + }) } func TestBaseKubernetesManager_WaitForKustomizations(t *testing.T) { @@ -476,6 +513,16 @@ func TestBaseKubernetesManager_DeleteNamespace(t *testing.T) { t.Run("Success", func(t *testing.T) { manager := setup(t) + client := NewMockKubernetesClient() + client.DeleteResourceFunc = func(gvr schema.GroupVersionResource, namespace, name string, opts metav1.DeleteOptions) error { + return nil + } + // Mock GetResource to return "not found" immediately to simulate successful deletion + client.GetResourceFunc = func(gvr schema.GroupVersionResource, namespace, name string) (*unstructured.Unstructured, error) { + return nil, fmt.Errorf("the server could not find the requested resource") + } + manager.client = client + err := manager.DeleteNamespace("test-namespace") if err != nil { t.Errorf("Expected no error, got %v", err) @@ -495,6 +542,27 @@ func TestBaseKubernetesManager_DeleteNamespace(t *testing.T) { t.Error("Expected error, got nil") } }) + + t.Run("UsesCorrectDeleteOptions", func(t *testing.T) { + manager := setup(t) + client := NewMockKubernetesClient() + var capturedOptions metav1.DeleteOptions + client.DeleteResourceFunc = func(gvr schema.GroupVersionResource, namespace, name string, opts metav1.DeleteOptions) error { + capturedOptions = opts + return nil + } + manager.client = client + + err := manager.DeleteNamespace("test-namespace") + if err != nil { + t.Errorf("Expected no error, got %v", err) + } + + // Verify the delete options were used (no specific policy required) + if capturedOptions.PropagationPolicy != nil { + t.Errorf("Expected no PropagationPolicy, got %+v", capturedOptions.PropagationPolicy) + } + }) } func TestBaseKubernetesManager_ApplyConfigMap(t *testing.T) { @@ -1472,64 +1540,6 @@ func TestBaseKubernetesManager_ApplyGitRepository(t *testing.T) { }) } -func TestBaseKubernetesManager_WaitForKustomizationsDeleted(t *testing.T) { - setup := func(t *testing.T) *BaseKubernetesManager { - t.Helper() - mocks := setupMocks(t) - manager := NewKubernetesManager(mocks.Injector) - if err := manager.Initialize(); err != nil { - t.Fatalf("Initialize failed: %v", err) - } - // Use shorter timeouts for tests - manager.kustomizationWaitPollInterval = 50 * time.Millisecond - manager.kustomizationReconcileTimeout = 100 * time.Millisecond - manager.kustomizationReconcileSleep = 50 * time.Millisecond - return manager - } - - t.Run("Success", func(t *testing.T) { - manager := setup(t) - client := NewMockKubernetesClient() - client.GetResourceFunc = func(gvr schema.GroupVersionResource, ns, name string) (*unstructured.Unstructured, error) { - return nil, fmt.Errorf("not found") - } - manager.client = client - - err := manager.WaitForKustomizationsDeleted("Waiting for deletion", "k1", "k2") - if err != nil { - t.Errorf("Expected no error, got %v", err) - } - }) - - t.Run("Timeout", func(t *testing.T) { - manager := setup(t) - client := NewMockKubernetesClient() - client.GetResourceFunc = func(gvr schema.GroupVersionResource, ns, name string) (*unstructured.Unstructured, error) { - return &unstructured.Unstructured{}, nil - } - manager.client = client - - err := manager.WaitForKustomizationsDeleted("Waiting for deletion", "k1", "k2") - if err == nil { - t.Error("Expected timeout error, got nil") - } - }) - - t.Run("GetResourceError", func(t *testing.T) { - manager := setup(t) - client := NewMockKubernetesClient() - client.GetResourceFunc = func(gvr schema.GroupVersionResource, ns, name string) (*unstructured.Unstructured, error) { - return nil, fmt.Errorf("some transient error") - } - manager.client = client - - err := manager.WaitForKustomizationsDeleted("Waiting for deletion", "k1", "k2") - if err != nil { - t.Errorf("Expected no error, got %v", err) - } - }) -} - func TestBaseKubernetesManager_CheckGitRepositoryStatus(t *testing.T) { t.Run("Success", func(t *testing.T) { manager := func(t *testing.T) *BaseKubernetesManager { diff --git a/pkg/kubernetes/mock_kubernetes_manager.go b/pkg/kubernetes/mock_kubernetes_manager.go index c4c3ab870..726ea0c8a 100644 --- a/pkg/kubernetes/mock_kubernetes_manager.go +++ b/pkg/kubernetes/mock_kubernetes_manager.go @@ -32,7 +32,6 @@ type MockKubernetesManager struct { SuspendHelmReleaseFunc func(name, namespace string) error ApplyGitRepositoryFunc func(repo *sourcev1.GitRepository) error ApplyOCIRepositoryFunc func(repo *sourcev1.OCIRepository) error - WaitForKustomizationsDeletedFunc func(message string, names ...string) error CheckGitRepositoryStatusFunc func() error WaitForKubernetesHealthyFunc func(ctx context.Context, endpoint string, outputFunc func(string), nodeNames ...string) error GetNodeReadyStatusFunc func(ctx context.Context, nodeNames []string) (map[string]bool, error) @@ -155,13 +154,7 @@ func (m *MockKubernetesManager) ApplyOCIRepository(repo *sourcev1.OCIRepository) return nil } -// WaitForKustomizationsDeleted waits for the specified kustomizations to be deleted. -func (m *MockKubernetesManager) WaitForKustomizationsDeleted(message string, names ...string) error { - if m.WaitForKustomizationsDeletedFunc != nil { - return m.WaitForKustomizationsDeletedFunc(message, names...) - } - return nil -} +// WaitForKustomizationDeletionProcessed waits for the specified kustomization deletion to be processed. // CheckGitRepositoryStatus checks the status of all GitRepository resources func (m *MockKubernetesManager) CheckGitRepositoryStatus() error { diff --git a/pkg/kubernetes/mock_kubernetes_manager_test.go b/pkg/kubernetes/mock_kubernetes_manager_test.go index 967664b46..fc576d9cc 100644 --- a/pkg/kubernetes/mock_kubernetes_manager_test.go +++ b/pkg/kubernetes/mock_kubernetes_manager_test.go @@ -334,32 +334,6 @@ func TestMockKubernetesManager_ApplyGitRepository(t *testing.T) { }) } -func TestMockKubernetesManager_WaitForKustomizationsDeleted(t *testing.T) { - setup := func(t *testing.T) *MockKubernetesManager { - t.Helper() - return NewMockKubernetesManager(nil) - } - msg := "msg" - name := "n" - - t.Run("FuncSet", func(t *testing.T) { - manager := setup(t) - manager.WaitForKustomizationsDeletedFunc = func(m string, n ...string) error { return fmt.Errorf("err") } - err := manager.WaitForKustomizationsDeleted(msg, name) - if err == nil || err.Error() != "err" { - t.Errorf("Expected error 'err', got %v", err) - } - }) - - t.Run("FuncNotSet", func(t *testing.T) { - manager := setup(t) - err := manager.WaitForKustomizationsDeleted(msg, name) - if err != nil { - t.Errorf("Expected nil, got %v", err) - } - }) -} - func TestMockKubernetesManager_CheckGitRepositoryStatus(t *testing.T) { setup := func(t *testing.T) *MockKubernetesManager { t.Helper() diff --git a/pkg/pipelines/down.go b/pkg/pipelines/down.go index 254f2678f..25b0bc046 100644 --- a/pkg/pipelines/down.go +++ b/pkg/pipelines/down.go @@ -163,13 +163,16 @@ func (p *DownPipeline) Execute(ctx context.Context) error { // Tear down the container runtime if enabled containerRuntimeEnabled := p.configHandler.GetBool("docker.enabled") - if containerRuntimeEnabled { + skipDockerFlag := ctx.Value("skipDocker") + if containerRuntimeEnabled && (skipDockerFlag == nil || !skipDockerFlag.(bool)) { if p.containerRuntime == nil { return fmt.Errorf("No container runtime found") } if err := p.containerRuntime.Down(); err != nil { return fmt.Errorf("Error running container runtime Down command: %w", err) } + } else if skipDockerFlag != nil && skipDockerFlag.(bool) { + fmt.Fprintln(os.Stderr, "Skipping Docker container cleanup (--skip-docker set)") } // Clean up context specific artifacts if --clean flag is set diff --git a/pkg/pipelines/down_test.go b/pkg/pipelines/down_test.go index d13554887..20ab391d0 100644 --- a/pkg/pipelines/down_test.go +++ b/pkg/pipelines/down_test.go @@ -899,6 +899,60 @@ func TestDownPipeline_Execute(t *testing.T) { t.Errorf("Expected error message containing 'Error performing cleanup', got: %v", err) } }) + + t.Run("SkipDockerFlag", func(t *testing.T) { + // Given a down pipeline with skipDocker flag set + pipeline := NewDownPipeline() + mocks := setupDownMocks(t) + err := pipeline.Initialize(mocks.Injector, context.Background()) + if err != nil { + t.Fatalf("Failed to initialize pipeline: %v", err) + } + + // Track method calls + var blueprintDownCalled bool + var stackDownCalled bool + var containerRuntimeDownCalled bool + + mocks.BlueprintHandler.DownFunc = func() error { + blueprintDownCalled = true + return nil + } + mocks.Stack.DownFunc = func() error { + stackDownCalled = true + return nil + } + mocks.ContainerRuntime.DownFunc = func() error { + containerRuntimeDownCalled = true + return nil + } + + // Create context with skipDocker flag + ctx := context.WithValue(context.Background(), "skipDocker", true) + + // When executing the pipeline + err = pipeline.Execute(ctx) + + // Then no error should be returned + if err != nil { + t.Errorf("Expected no error, got %v", err) + } + + // And blueprint down should be called + if !blueprintDownCalled { + t.Error("Expected blueprint down to be called") + } + + // And stack down should be called + if !stackDownCalled { + t.Error("Expected stack down to be called") + } + + // And container runtime down should NOT be called + if containerRuntimeDownCalled { + t.Error("Expected container runtime down to NOT be called") + } + }) } // =============================================================================