diff --git a/cmd/gh-aw/main.go b/cmd/gh-aw/main.go index 0a8ee0843ed..9a06d6aac2f 100644 --- a/cmd/gh-aw/main.go +++ b/cmd/gh-aw/main.go @@ -305,7 +305,8 @@ Examples: gh aw run daily-perf-improver --repeat 3 # Run 3 times total gh aw run daily-perf-improver --enable-if-needed # Enable if disabled, run, then restore state gh aw run daily-perf-improver --auto-merge-prs # Auto-merge any PRs created during execution - gh aw run daily-perf-improver -f name=value -f env=prod # Pass workflow inputs`, + gh aw run daily-perf-improver -f name=value -f env=prod # Pass workflow inputs + gh aw run daily-perf-improver --push # Commit and push workflow files before running`, Args: cobra.MinimumNArgs(1), RunE: func(cmd *cobra.Command, args []string) error { repeatCount, _ := cmd.Flags().GetInt("repeat") @@ -316,12 +317,13 @@ Examples: autoMergePRs, _ := cmd.Flags().GetBool("auto-merge-prs") pushSecrets, _ := cmd.Flags().GetBool("use-local-secrets") inputs, _ := cmd.Flags().GetStringArray("raw-field") + push, _ := cmd.Flags().GetBool("push") if err := validateEngine(engineOverride); err != nil { return err } - return cli.RunWorkflowsOnGitHub(cmd.Context(), args, repeatCount, enable, engineOverride, repoOverride, refOverride, autoMergePRs, pushSecrets, inputs, verboseFlag) + return cli.RunWorkflowsOnGitHub(cmd.Context(), args, repeatCount, enable, engineOverride, repoOverride, refOverride, autoMergePRs, pushSecrets, push, inputs, verboseFlag) }, } @@ -512,6 +514,7 @@ Use "` + string(constants.CLIExtensionPrefix) + ` help all" to show help for all runCmd.Flags().Bool("auto-merge-prs", false, "Auto-merge any pull requests created during the workflow execution") runCmd.Flags().Bool("use-local-secrets", false, "Use local environment API key secrets for workflow execution (pushes and cleans up secrets in repository)") runCmd.Flags().StringArrayP("raw-field", "F", []string{}, "Add a string parameter in key=value format (can be used multiple times)") + runCmd.Flags().Bool("push", false, "Commit and push workflow files (including transitive imports) before running") // Register completions for run command runCmd.ValidArgsFunction = cli.CompleteWorkflowNames cli.RegisterEngineFlagCompletion(runCmd) diff --git a/pkg/cli/commands_test.go b/pkg/cli/commands_test.go index 45c6550a0f9..d276e353db3 100644 --- a/pkg/cli/commands_test.go +++ b/pkg/cli/commands_test.go @@ -337,13 +337,13 @@ func TestDisableWorkflowsFailureScenarios(t *testing.T) { func TestRunWorkflowOnGitHub(t *testing.T) { // Test with empty workflow name - err := RunWorkflowOnGitHub(context.Background(), "", false, "", "", "", false, false, false, []string{}, false) + err := RunWorkflowOnGitHub(context.Background(), "", false, "", "", "", false, false, false, false, []string{}, false) if err == nil { t.Error("RunWorkflowOnGitHub should return error for empty workflow name") } // Test with nonexistent workflow (this will fail but gracefully) - err = RunWorkflowOnGitHub(context.Background(), "nonexistent-workflow", false, "", "", "", false, false, false, []string{}, false) + err = RunWorkflowOnGitHub(context.Background(), "nonexistent-workflow", false, "", "", "", false, false, false, false, []string{}, false) if err == nil { t.Error("RunWorkflowOnGitHub should return error for non-existent workflow") } @@ -351,25 +351,25 @@ func TestRunWorkflowOnGitHub(t *testing.T) { func TestRunWorkflowsOnGitHub(t *testing.T) { // Test with empty workflow list - err := RunWorkflowsOnGitHub(context.Background(), []string{}, 0, false, "", "", "", false, false, []string{}, false) + err := RunWorkflowsOnGitHub(context.Background(), []string{}, 0, false, "", "", "", false, false, false, []string{}, false) if err == nil { t.Error("RunWorkflowsOnGitHub should return error for empty workflow list") } // Test with workflow list containing empty name - err = RunWorkflowsOnGitHub(context.Background(), []string{"valid-workflow", ""}, 0, false, "", "", "", false, false, []string{}, false) + err = RunWorkflowsOnGitHub(context.Background(), []string{"valid-workflow", ""}, 0, false, "", "", "", false, false, false, []string{}, false) if err == nil { t.Error("RunWorkflowsOnGitHub should return error for workflow list containing empty name") } // Test with nonexistent workflows (this will fail but gracefully) - err = RunWorkflowsOnGitHub(context.Background(), []string{"nonexistent-workflow1", "nonexistent-workflow2"}, 0, false, "", "", "", false, false, []string{}, false) + err = RunWorkflowsOnGitHub(context.Background(), []string{"nonexistent-workflow1", "nonexistent-workflow2"}, 0, false, "", "", "", false, false, false, []string{}, false) if err == nil { t.Error("RunWorkflowsOnGitHub should return error for non-existent workflows") } // Test with negative repeat seconds (should work as 0) - err = RunWorkflowsOnGitHub(context.Background(), []string{"nonexistent-workflow"}, -1, false, "", "", "", false, false, []string{}, false) + err = RunWorkflowsOnGitHub(context.Background(), []string{"nonexistent-workflow"}, -1, false, "", "", "", false, false, false, []string{}, false) if err == nil { t.Error("RunWorkflowsOnGitHub should return error for non-existent workflow regardless of repeat value") } @@ -427,10 +427,10 @@ Test workflow for command existence.` {func() error { return EnableWorkflows("nonexistent") }, true, "EnableWorkflows"}, // Should now error when no workflows found to enable {func() error { return DisableWorkflows("nonexistent") }, true, "DisableWorkflows"}, // Should now also error when no workflows found to disable {func() error { - return RunWorkflowOnGitHub(context.Background(), "", false, "", "", "", false, false, false, []string{}, false) + return RunWorkflowOnGitHub(context.Background(), "", false, "", "", "", false, false, false, false, []string{}, false) }, true, "RunWorkflowOnGitHub"}, // Should error with empty workflow name {func() error { - return RunWorkflowsOnGitHub(context.Background(), []string{}, 0, false, "", "", "", false, false, []string{}, false) + return RunWorkflowsOnGitHub(context.Background(), []string{}, 0, false, "", "", "", false, false, false, []string{}, false) }, true, "RunWorkflowsOnGitHub"}, // Should error with empty workflow list } @@ -1078,13 +1078,13 @@ func TestCalculateTimeRemaining(t *testing.T) { func TestRunWorkflowOnGitHubWithEnable(t *testing.T) { // Test with enable flag enabled (should not error for basic validation) - err := RunWorkflowOnGitHub(context.Background(), "nonexistent-workflow", true, "", "", "", false, false, false, []string{}, false) + err := RunWorkflowOnGitHub(context.Background(), "nonexistent-workflow", true, "", "", "", false, false, false, false, []string{}, false) if err == nil { t.Error("RunWorkflowOnGitHub should return error for non-existent workflow even with enable flag") } // Test with empty workflow name and enable flag - err = RunWorkflowOnGitHub(context.Background(), "", true, "", "", "", false, false, false, []string{}, false) + err = RunWorkflowOnGitHub(context.Background(), "", true, "", "", "", false, false, false, false, []string{}, false) if err == nil { t.Error("RunWorkflowOnGitHub should return error for empty workflow name regardless of enable flag") } diff --git a/pkg/cli/context_cancellation_test.go b/pkg/cli/context_cancellation_test.go index 74290a6728a..b588bac07f2 100644 --- a/pkg/cli/context_cancellation_test.go +++ b/pkg/cli/context_cancellation_test.go @@ -15,7 +15,7 @@ func TestRunWorkflowOnGitHubWithCancellation(t *testing.T) { cancel() // Try to run a workflow with a cancelled context - err := RunWorkflowOnGitHub(ctx, "test-workflow", false, "", "", "", false, false, false, []string{}, false) + err := RunWorkflowOnGitHub(ctx, "test-workflow", false, "", "", "", false, false, false, false, []string{}, false) // Should return context.Canceled error assert.ErrorIs(t, err, context.Canceled, "Should return context.Canceled error when context is cancelled") @@ -28,7 +28,7 @@ func TestRunWorkflowsOnGitHubWithCancellation(t *testing.T) { cancel() // Try to run workflows with a cancelled context - err := RunWorkflowsOnGitHub(ctx, []string{"test-workflow"}, 0, false, "", "", "", false, false, []string{}, false) + err := RunWorkflowsOnGitHub(ctx, []string{"test-workflow"}, 0, false, "", "", "", false, false, false, []string{}, false) // Should return context.Canceled error assert.ErrorIs(t, err, context.Canceled, "Should return context.Canceled error when context is cancelled") @@ -96,7 +96,7 @@ func TestRunWorkflowsOnGitHubCancellationDuringExecution(t *testing.T) { // Try to run multiple workflows that would take a long time // This should fail validation before timeout, but if it gets past validation, // it should respect the context cancellation - err := RunWorkflowsOnGitHub(ctx, []string{"nonexistent-workflow-1", "nonexistent-workflow-2"}, 0, false, "", "", "", false, false, []string{}, false) + err := RunWorkflowsOnGitHub(ctx, []string{"nonexistent-workflow-1", "nonexistent-workflow-2"}, 0, false, "", "", "", false, false, false, []string{}, false) // Should return an error (either validation error or context error) assert.Error(t, err, "Should return an error") diff --git a/pkg/cli/interactive.go b/pkg/cli/interactive.go index f1bdd91fe69..cb2794c1fcd 100644 --- a/pkg/cli/interactive.go +++ b/pkg/cli/interactive.go @@ -30,13 +30,6 @@ var commonWorkflowNames = []string{ "documentation-check", } -// isAccessibleMode detects if accessibility mode should be enabled based on environment variables -func isAccessibleMode() bool { - return os.Getenv("ACCESSIBLE") != "" || - os.Getenv("TERM") == "dumb" || - os.Getenv("NO_COLOR") != "" -} - // InteractiveWorkflowBuilder collects user input to build an agentic workflow type InteractiveWorkflowBuilder struct { WorkflowName string @@ -102,7 +95,7 @@ func (b *InteractiveWorkflowBuilder) promptForWorkflowName() error { Value(&b.WorkflowName). Validate(ValidateWorkflowName), ), - ).WithAccessible(isAccessibleMode()) + ).WithAccessible(console.IsAccessibleMode()) return form.Run() } @@ -225,7 +218,7 @@ func (b *InteractiveWorkflowBuilder) promptForConfiguration() error { ). Title("Instructions"). Description("Describe what you want this workflow to accomplish"), - ).WithAccessible(isAccessibleMode()) + ).WithAccessible(console.IsAccessibleMode()) if err := form.Run(); err != nil { return err @@ -268,7 +261,7 @@ func (b *InteractiveWorkflowBuilder) generateWorkflow(force bool) error { Negative("No, cancel"). Value(&overwrite), ), - ).WithAccessible(isAccessibleMode()) + ).WithAccessible(console.IsAccessibleMode()) if err := confirmForm.Run(); err != nil { return fmt.Errorf("confirmation failed: %w", err) diff --git a/pkg/cli/interactive_test.go b/pkg/cli/interactive_test.go index 6b5a513da7c..d35ca48baef 100644 --- a/pkg/cli/interactive_test.go +++ b/pkg/cli/interactive_test.go @@ -4,6 +4,8 @@ import ( "os" "strings" "testing" + + "github.com/githubnext/gh-aw/pkg/console" ) func TestValidateWorkflowName_Integration(t *testing.T) { @@ -225,7 +227,7 @@ func TestIsAccessibleMode(t *testing.T) { os.Unsetenv("NO_COLOR") } - result := isAccessibleMode() + result := console.IsAccessibleMode() // Restore original values if origAccessible != "" { @@ -245,7 +247,7 @@ func TestIsAccessibleMode(t *testing.T) { } if result != tt.expected { - t.Errorf("isAccessibleMode() with ACCESSIBLE=%q TERM=%q NO_COLOR=%q = %v, want %v", + t.Errorf("console.IsAccessibleMode() with ACCESSIBLE=%q TERM=%q NO_COLOR=%q = %v, want %v", tt.accessible, tt.term, tt.noColor, result, tt.expected) } }) diff --git a/pkg/cli/run_command.go b/pkg/cli/run_command.go index 77d299d877f..66f2d2d39be 100644 --- a/pkg/cli/run_command.go +++ b/pkg/cli/run_command.go @@ -22,8 +22,8 @@ import ( var runLog = logger.New("cli:run_command") // RunWorkflowOnGitHub runs an agentic workflow on GitHub Actions -func RunWorkflowOnGitHub(ctx context.Context, workflowIdOrName string, enable bool, engineOverride string, repoOverride string, refOverride string, autoMergePRs bool, pushSecrets bool, waitForCompletion bool, inputs []string, verbose bool) error { - runLog.Printf("Starting workflow run: workflow=%s, enable=%v, engineOverride=%s, repo=%s, ref=%s, wait=%v, inputs=%v", workflowIdOrName, enable, engineOverride, repoOverride, refOverride, waitForCompletion, inputs) +func RunWorkflowOnGitHub(ctx context.Context, workflowIdOrName string, enable bool, engineOverride string, repoOverride string, refOverride string, autoMergePRs bool, pushSecrets bool, push bool, waitForCompletion bool, inputs []string, verbose bool) error { + runLog.Printf("Starting workflow run: workflow=%s, enable=%v, engineOverride=%s, repo=%s, ref=%s, push=%v, wait=%v, inputs=%v", workflowIdOrName, enable, engineOverride, repoOverride, refOverride, push, waitForCompletion, inputs) // Check context cancellation at the start select { @@ -225,6 +225,46 @@ func RunWorkflowOnGitHub(ctx context.Context, workflowIdOrName string, enable bo fmt.Printf("Using lock file: %s\n", lockFileName) } + // Check for missing or outdated lock files (when not using --push) + if !push && repoOverride == "" { + workflowMarkdownPath := strings.TrimSuffix(lockFilePath, ".lock.yml") + ".md" + if status, err := checkLockFileStatus(workflowMarkdownPath); err == nil { + if status.Missing { + fmt.Fprintln(os.Stderr, console.FormatWarningMessage("Lock file is missing")) + fmt.Fprintln(os.Stderr, console.FormatInfoMessage(fmt.Sprintf("Run 'gh aw run %s --push' to automatically compile and push the lock file", workflowIdOrName))) + } else if status.Outdated { + fmt.Fprintln(os.Stderr, console.FormatWarningMessage("Lock file is outdated (workflow file is newer)")) + fmt.Fprintln(os.Stderr, console.FormatInfoMessage(fmt.Sprintf("Run 'gh aw run %s --push' to automatically compile and push the lock file", workflowIdOrName))) + } + } + } + + // Handle --push flag: commit and push workflow files before running + if push { + // Only valid for local workflows + if repoOverride != "" { + return fmt.Errorf("--push flag is only supported for local workflows, not remote repositories") + } + + if verbose { + fmt.Fprintln(os.Stderr, console.FormatInfoMessage("Collecting workflow files for push...")) + } + + // Collect the workflow .md file, .lock.yml file, and transitive imports + workflowMarkdownPath := strings.TrimSuffix(lockFilePath, ".lock.yml") + ".md" + files, err := collectWorkflowFiles(workflowMarkdownPath, verbose) + if err != nil { + return fmt.Errorf("failed to collect workflow files: %w", err) + } + + // Commit and push the files (includes branch verification if --ref is specified) + if err := pushWorkflowFiles(workflowIdOrName, files, refOverride, verbose); err != nil { + return fmt.Errorf("failed to push workflow files: %w", err) + } + + fmt.Println(console.FormatSuccessMessage(fmt.Sprintf("Successfully pushed %d file(s) for workflow %s", len(files), workflowIdOrName))) + } + // Handle secret pushing if requested var secretTracker *TrialSecretTracker if pushSecrets { @@ -472,7 +512,7 @@ func RunWorkflowOnGitHub(ctx context.Context, workflowIdOrName string, enable bo } // RunWorkflowsOnGitHub runs multiple agentic workflows on GitHub Actions, optionally repeating a specified number of times -func RunWorkflowsOnGitHub(ctx context.Context, workflowNames []string, repeatCount int, enable bool, engineOverride string, repoOverride string, refOverride string, autoMergePRs bool, pushSecrets bool, inputs []string, verbose bool) error { +func RunWorkflowsOnGitHub(ctx context.Context, workflowNames []string, repeatCount int, enable bool, engineOverride string, repoOverride string, refOverride string, autoMergePRs bool, pushSecrets bool, push bool, inputs []string, verbose bool) error { if len(workflowNames) == 0 { return fmt.Errorf("at least one workflow name or ID is required") } @@ -535,7 +575,7 @@ func RunWorkflowsOnGitHub(ctx context.Context, workflowNames []string, repeatCou fmt.Println(console.FormatProgressMessage(fmt.Sprintf("Running workflow %d/%d: %s", i+1, len(workflowNames), workflowName))) } - if err := RunWorkflowOnGitHub(ctx, workflowName, enable, engineOverride, repoOverride, refOverride, autoMergePRs, pushSecrets, waitForCompletion, inputs, verbose); err != nil { + if err := RunWorkflowOnGitHub(ctx, workflowName, enable, engineOverride, repoOverride, refOverride, autoMergePRs, pushSecrets, push, waitForCompletion, inputs, verbose); err != nil { return fmt.Errorf("failed to run workflow '%s': %w", workflowName, err) } diff --git a/pkg/cli/run_push.go b/pkg/cli/run_push.go new file mode 100644 index 00000000000..f4a5ebb0540 --- /dev/null +++ b/pkg/cli/run_push.go @@ -0,0 +1,586 @@ +package cli + +import ( + "context" + "fmt" + "os" + "os/exec" + "path/filepath" + "sort" + "strings" + + "github.com/githubnext/gh-aw/pkg/console" + "github.com/githubnext/gh-aw/pkg/logger" + "github.com/githubnext/gh-aw/pkg/parser" +) + +var runPushLog = logger.New("cli:run_push") + +// collectWorkflowFiles collects the workflow .md file, its corresponding .lock.yml file, +// and the transitive closure of all imported files +func collectWorkflowFiles(workflowPath string, verbose bool) ([]string, error) { + runPushLog.Printf("Collecting files for workflow: %s", workflowPath) + + files := make(map[string]bool) // Use map to avoid duplicates + visited := make(map[string]bool) + + // Get absolute path for the workflow + absWorkflowPath, err := filepath.Abs(workflowPath) + if err != nil { + runPushLog.Printf("Failed to get absolute path for %s: %v", workflowPath, err) + return nil, fmt.Errorf("failed to get absolute path for workflow: %w", err) + } + runPushLog.Printf("Resolved absolute workflow path: %s", absWorkflowPath) + + // Add the workflow .md file + files[absWorkflowPath] = true + runPushLog.Printf("Added workflow file: %s", absWorkflowPath) + + // Check if lock file needs recompilation + lockFilePath := strings.TrimSuffix(absWorkflowPath, ".md") + ".lock.yml" + runPushLog.Printf("Checking lock file: %s", lockFilePath) + needsRecompile := false + + if lockStat, err := os.Stat(lockFilePath); err == nil { + runPushLog.Printf("Lock file exists: %s", lockFilePath) + // Lock file exists - check if it's outdated + if mdStat, err := os.Stat(absWorkflowPath); err == nil { + runPushLog.Printf("Comparing modification times - md: %v, lock: %v", mdStat.ModTime(), lockStat.ModTime()) + if mdStat.ModTime().After(lockStat.ModTime()) { + needsRecompile = true + runPushLog.Printf("Lock file is outdated (md: %v, lock: %v)", mdStat.ModTime(), lockStat.ModTime()) + if verbose { + fmt.Fprintln(os.Stderr, console.FormatInfoMessage("Detected outdated lock file, recompiling workflow...")) + } + } else { + runPushLog.Printf("Lock file is up-to-date") + } + } + } else if os.IsNotExist(err) { + // Lock file doesn't exist - needs compilation + needsRecompile = true + runPushLog.Printf("Lock file not found: %s", lockFilePath) + if verbose { + fmt.Fprintln(os.Stderr, console.FormatInfoMessage("Lock file not found, compiling workflow...")) + } + } else { + runPushLog.Printf("Error checking lock file: %v", err) + } + + // Recompile if needed + if needsRecompile { + runPushLog.Printf("Recompilation needed for %s", absWorkflowPath) + if err := recompileWorkflow(absWorkflowPath, verbose); err != nil { + runPushLog.Printf("Failed to recompile workflow: %v", err) + return nil, fmt.Errorf("failed to recompile workflow: %w", err) + } + if verbose { + fmt.Fprintln(os.Stderr, console.FormatSuccessMessage("Workflow compiled successfully")) + } + runPushLog.Printf("Recompilation completed successfully") + } else { + runPushLog.Printf("Recompilation not needed") + } + + // Add the corresponding .lock.yml file + if _, err := os.Stat(lockFilePath); err == nil { + files[lockFilePath] = true + runPushLog.Printf("Added lock file: %s", lockFilePath) + } else if verbose { + runPushLog.Printf("Lock file not found after compilation: %s", lockFilePath) + fmt.Fprintln(os.Stderr, console.FormatWarningMessage(fmt.Sprintf("Lock file not found after compilation: %s", lockFilePath))) + } + + // Collect transitive closure of imported files + runPushLog.Printf("Starting import collection for %s", absWorkflowPath) + if err := collectImports(absWorkflowPath, files, visited, verbose); err != nil { + runPushLog.Printf("Failed to collect imports: %v", err) + return nil, fmt.Errorf("failed to collect imports: %w", err) + } + runPushLog.Printf("Import collection completed") + + // Convert map to slice + result := make([]string, 0) + for file := range files { + result = append(result, file) + } + + // Sort files for stable output + sort.Strings(result) + runPushLog.Printf("Sorted %d files for stable output", len(result)) + + runPushLog.Printf("Collected %d files total", len(result)) + return result, nil +} + +// recompileWorkflow compiles a workflow using CompileWorkflows +func recompileWorkflow(workflowPath string, verbose bool) error { + runPushLog.Printf("Recompiling workflow: %s", workflowPath) + + config := CompileConfig{ + MarkdownFiles: []string{workflowPath}, + Verbose: verbose, + EngineOverride: "", + Validate: true, + Watch: false, + WorkflowDir: "", + SkipInstructions: false, + NoEmit: false, + Purge: false, + TrialMode: false, + TrialLogicalRepoSlug: "", + Strict: false, + } + + runPushLog.Printf("Compilation config: Validate=%v, NoEmit=%v", config.Validate, config.NoEmit) + + // Use background context for compilation + ctx := context.Background() + runPushLog.Printf("Starting compilation with CompileWorkflows") + if _, err := CompileWorkflows(ctx, config); err != nil { + runPushLog.Printf("Compilation failed: %v", err) + return fmt.Errorf("compilation failed: %w", err) + } + + runPushLog.Printf("Successfully recompiled workflow: %s", workflowPath) + return nil +} + +// checkLockFileStatus checks if a lock file is missing or outdated and returns status info +type LockFileStatus struct { + Missing bool + Outdated bool + LockPath string +} + +// checkLockFileStatus checks the status of a workflow's lock file +func checkLockFileStatus(workflowPath string) (*LockFileStatus, error) { + runPushLog.Printf("Checking lock file status for: %s", workflowPath) + + // Get absolute path for the workflow + absWorkflowPath, err := filepath.Abs(workflowPath) + if err != nil { + runPushLog.Printf("Failed to get absolute path for %s: %v", workflowPath, err) + return nil, fmt.Errorf("failed to get absolute path for workflow: %w", err) + } + runPushLog.Printf("Resolved absolute path: %s", absWorkflowPath) + + lockFilePath := strings.TrimSuffix(absWorkflowPath, ".md") + ".lock.yml" + runPushLog.Printf("Expected lock file path: %s", lockFilePath) + status := &LockFileStatus{ + LockPath: lockFilePath, + } + + // Check if lock file exists + lockStat, err := os.Stat(lockFilePath) + if err != nil { + if os.IsNotExist(err) { + status.Missing = true + runPushLog.Printf("Lock file missing: %s", lockFilePath) + return status, nil + } + runPushLog.Printf("Error stating lock file: %v", err) + return nil, fmt.Errorf("failed to stat lock file: %w", err) + } + runPushLog.Printf("Lock file exists: %s (modtime: %v)", lockFilePath, lockStat.ModTime()) + + // Lock file exists - check if it's outdated + mdStat, err := os.Stat(absWorkflowPath) + if err != nil { + runPushLog.Printf("Error stating workflow file: %v", err) + return nil, fmt.Errorf("failed to stat workflow file: %w", err) + } + runPushLog.Printf("Workflow file modtime: %v", mdStat.ModTime()) + + if mdStat.ModTime().After(lockStat.ModTime()) { + status.Outdated = true + runPushLog.Printf("Lock file outdated (md: %v, lock: %v)", mdStat.ModTime(), lockStat.ModTime()) + } else { + runPushLog.Printf("Lock file is up-to-date") + } + + return status, nil +} + +// collectImports recursively collects all imported files (transitive closure) +func collectImports(workflowPath string, files map[string]bool, visited map[string]bool, verbose bool) error { + // Avoid processing the same file multiple times + if visited[workflowPath] { + runPushLog.Printf("Skipping already visited file: %s", workflowPath) + return nil + } + visited[workflowPath] = true + + runPushLog.Printf("Processing imports for: %s", workflowPath) + + // Read the workflow file + content, err := os.ReadFile(workflowPath) + if err != nil { + runPushLog.Printf("Failed to read workflow file %s: %v", workflowPath, err) + return fmt.Errorf("failed to read workflow file %s: %w", workflowPath, err) + } + runPushLog.Printf("Read %d bytes from %s", len(content), workflowPath) + + // Extract frontmatter to get imports field + result, err := parser.ExtractFrontmatterFromContent(string(content)) + if err != nil { + // No frontmatter is okay - might be a simple file + runPushLog.Printf("No frontmatter in %s, skipping imports extraction: %v", workflowPath, err) + return nil + } + runPushLog.Printf("Extracted frontmatter from %s", workflowPath) + + // Get imports from frontmatter + importsField, exists := result.Frontmatter["imports"] + if !exists { + runPushLog.Printf("No imports field in %s", workflowPath) + return nil + } + runPushLog.Printf("Found imports field in %s", workflowPath) + + // Parse imports field - can be array of strings or objects with path + workflowDir := filepath.Dir(workflowPath) + runPushLog.Printf("Workflow directory: %s", workflowDir) + var imports []string + + switch v := importsField.(type) { + case []any: + runPushLog.Printf("Parsing imports as []any with %d items", len(v)) + for i, item := range v { + switch importItem := item.(type) { + case string: + // Simple string import + runPushLog.Printf("Import %d: string format: %s", i, importItem) + imports = append(imports, importItem) + case map[string]any: + // Object import with path field + if pathValue, hasPath := importItem["path"]; hasPath { + if pathStr, ok := pathValue.(string); ok { + runPushLog.Printf("Import %d: object format with path: %s", i, pathStr) + imports = append(imports, pathStr) + } else { + runPushLog.Printf("Import %d: object has path but not string type", i) + } + } else { + runPushLog.Printf("Import %d: object missing path field", i) + } + default: + runPushLog.Printf("Import %d: unknown type: %T", i, importItem) + } + } + case []string: + runPushLog.Printf("Parsing imports as []string with %d items", len(v)) + imports = v + default: + runPushLog.Printf("Imports field has unexpected type: %T", v) + } + + runPushLog.Printf("Found %d imports in %s", len(imports), workflowPath) + + // Process each import + for i, importPath := range imports { + runPushLog.Printf("Processing import %d/%d: %s", i+1, len(imports), importPath) + + // Resolve the import path + resolvedPath := resolveImportPathLocal(importPath, workflowDir) + if resolvedPath == "" { + runPushLog.Printf("Could not resolve import path: %s", importPath) + if verbose { + fmt.Fprintln(os.Stderr, console.FormatWarningMessage(fmt.Sprintf("Could not resolve import: %s", importPath))) + } + continue + } + runPushLog.Printf("Resolved import path: %s -> %s", importPath, resolvedPath) + + // Get absolute path + var absImportPath string + if filepath.IsAbs(resolvedPath) { + absImportPath = resolvedPath + runPushLog.Printf("Import path is absolute: %s", absImportPath) + } else { + absImportPath = filepath.Join(workflowDir, resolvedPath) + runPushLog.Printf("Joined relative path: %s + %s = %s", workflowDir, resolvedPath, absImportPath) + } + + // Check if file exists + if _, err := os.Stat(absImportPath); err != nil { + runPushLog.Printf("Import file not found: %s (error: %v)", absImportPath, err) + if verbose { + fmt.Fprintln(os.Stderr, console.FormatWarningMessage(fmt.Sprintf("Import file not found: %s", absImportPath))) + } + continue + } + runPushLog.Printf("Import file exists: %s", absImportPath) + + // Add the import file + files[absImportPath] = true + runPushLog.Printf("Added import file: %s", absImportPath) + + // Recursively collect imports from this file + runPushLog.Printf("Recursively collecting imports from: %s", absImportPath) + if err := collectImports(absImportPath, files, visited, verbose); err != nil { + runPushLog.Printf("Failed to recursively collect imports from %s: %v", absImportPath, err) + return err + } + } + + runPushLog.Printf("Finished processing imports for: %s", workflowPath) + return nil +} + +// resolveImportPathLocal is a local version of resolveImportPath for push functionality +// This is needed to avoid circular dependencies with imports.go +func resolveImportPathLocal(importPath, baseDir string) string { + runPushLog.Printf("Resolving import path: %s (baseDir: %s)", importPath, baseDir) + + // Handle section references (file.md#Section) - strip the section part + if strings.Contains(importPath, "#") { + parts := strings.SplitN(importPath, "#", 2) + runPushLog.Printf("Stripping section reference: %s -> %s", importPath, parts[0]) + importPath = parts[0] + } + + // Skip workflowspec format imports (owner/repo/path@sha) + if strings.Contains(importPath, "@") || isWorkflowSpecFormatLocal(importPath) { + runPushLog.Printf("Skipping workflowspec format import: %s", importPath) + return "" + } + + // If the import path is absolute (starts with /), use it relative to repo root + if strings.HasPrefix(importPath, "/") { + runPushLog.Printf("Import path is absolute (starts with /): %s", importPath) + // Find git root + gitRoot, err := findGitRoot() + if err != nil { + runPushLog.Printf("Failed to find git root: %v", err) + return "" + } + resolved := filepath.Join(gitRoot, strings.TrimPrefix(importPath, "/")) + runPushLog.Printf("Resolved absolute import: %s (git root: %s)", resolved, gitRoot) + return resolved + } + + // Otherwise, resolve relative to the workflow file's directory + resolved := filepath.Join(baseDir, importPath) + runPushLog.Printf("Resolved relative import: %s", resolved) + return resolved +} + +// isWorkflowSpecFormatLocal is a local version of isWorkflowSpecFormat for push functionality +// This is duplicated from imports.go to avoid circular dependencies +func isWorkflowSpecFormatLocal(path string) bool { + runPushLog.Printf("Checking if workflowspec format: %s", path) + + // Check if it contains @ (ref separator) or looks like owner/repo/path + if strings.Contains(path, "@") { + runPushLog.Printf("Path contains @ - workflowspec format: %s", path) + return true + } + + // Remove section reference if present + cleanPath := path + if idx := strings.Index(path, "#"); idx != -1 { + cleanPath = path[:idx] + runPushLog.Printf("Removed section reference: %s -> %s", path, cleanPath) + } + + // Check if it has at least 3 parts and doesn't start with . or / + parts := strings.Split(cleanPath, "/") + if len(parts) >= 3 && !strings.HasPrefix(cleanPath, ".") && !strings.HasPrefix(cleanPath, "/") { + runPushLog.Printf("Path has %d parts and matches owner/repo/path format - workflowspec format: %s", len(parts), path) + return true + } + + runPushLog.Printf("Path is not workflowspec format: %s", path) + return false +} + +// pushWorkflowFiles commits and pushes the workflow files to the repository +func pushWorkflowFiles(workflowName string, files []string, refOverride string, verbose bool) error { + runPushLog.Printf("Pushing %d files for workflow: %s", len(files), workflowName) + runPushLog.Printf("Files to push: %v", files) + + if verbose { + fmt.Fprintln(os.Stderr, console.FormatInfoMessage(fmt.Sprintf("Staging %d files for commit", len(files)))) + for _, file := range files { + fmt.Fprintf(os.Stderr, " - %s\n", file) + } + } + + // Stage all files + gitArgs := append([]string{"add"}, files...) + runPushLog.Printf("Executing git command: git %v", gitArgs) + cmd := exec.Command("git", gitArgs...) + if output, err := cmd.CombinedOutput(); err != nil { + runPushLog.Printf("Failed to stage files: %v, output: %s", err, string(output)) + return fmt.Errorf("failed to stage files: %w\nOutput: %s", err, string(output)) + } + runPushLog.Printf("Successfully staged %d files", len(files)) + + if verbose { + fmt.Fprintln(os.Stderr, console.FormatInfoMessage("Files staged successfully")) + } + + // Check if there are any staged files in git (after we've staged our files) + runPushLog.Printf("Checking staged files with git diff --cached --name-only") + statusCmd := exec.Command("git", "diff", "--cached", "--name-only") + statusOutput, err := statusCmd.CombinedOutput() + if err != nil { + runPushLog.Printf("Failed to check git status: %v, output: %s", err, string(statusOutput)) + return fmt.Errorf("failed to check git status: %w\nOutput: %s", err, string(statusOutput)) + } + runPushLog.Printf("Git status output: %s", string(statusOutput)) + + // Check if there are no staged changes (nothing to commit) + if len(strings.TrimSpace(string(statusOutput))) == 0 { + runPushLog.Printf("No staged changes detected") + if verbose { + fmt.Fprintln(os.Stderr, console.FormatInfoMessage("No changes to commit")) + } + runPushLog.Print("No changes to commit") + return nil + } + + // Now that we know there are changes to commit, check that current branch matches --ref value if specified + // This happens after we've determined there are actual changes, so we don't fail unnecessarily + if refOverride != "" { + runPushLog.Printf("Checking if current branch matches --ref value: %s", refOverride) + currentBranch, err := getCurrentBranch() + if err != nil { + runPushLog.Printf("Failed to determine current branch: %v", err) + return fmt.Errorf("failed to determine current branch: %w", err) + } + runPushLog.Printf("Current branch: %s", currentBranch) + + if currentBranch != refOverride { + runPushLog.Printf("Current branch (%s) does not match --ref value (%s)", currentBranch, refOverride) + return fmt.Errorf("--push requires the current branch (%s) to match the --ref value (%s). Switching branches is not supported. Please checkout the target branch first", currentBranch, refOverride) + } + + runPushLog.Printf("Current branch matches --ref value: %s", currentBranch) + if verbose { + fmt.Fprintln(os.Stderr, console.FormatInfoMessage(fmt.Sprintf("Verified current branch matches --ref: %s", currentBranch))) + } + } + + // Get the list of staged files + stagedFiles := strings.Split(strings.TrimSpace(string(statusOutput)), "\n") + runPushLog.Printf("Found %d staged files: %v", len(stagedFiles), stagedFiles) + + // Check if there are staged files beyond what we just staged + // Convert our files list to a map for quick lookup + runPushLog.Printf("Building map of our files for comparison") + ourFiles := make(map[string]bool) + for _, file := range files { + // Normalize the path + absPath, err := filepath.Abs(file) + if err == nil { + ourFiles[absPath] = true + runPushLog.Printf("Added to our files map: %s (absolute: %s)", file, absPath) + } else { + runPushLog.Printf("Failed to get absolute path for %s: %v", file, err) + } + ourFiles[file] = true + runPushLog.Printf("Added to our files map: %s", file) + } + + // Check if there are any staged files that aren't in our list + runPushLog.Printf("Checking for extra staged files not in our list") + var extraStagedFiles []string + for _, stagedFile := range stagedFiles { + runPushLog.Printf("Checking staged file: %s", stagedFile) + // Try both absolute and relative paths + absStagedPath, err := filepath.Abs(stagedFile) + if err == nil && ourFiles[absStagedPath] { + runPushLog.Printf("Staged file %s matches our file %s (absolute)", stagedFile, absStagedPath) + continue + } + if ourFiles[stagedFile] { + runPushLog.Printf("Staged file %s matches our file (relative)", stagedFile) + continue + } + runPushLog.Printf("Extra staged file detected: %s", stagedFile) + extraStagedFiles = append(extraStagedFiles, stagedFile) + } + + // If there are extra staged files that we didn't stage, give up + if len(extraStagedFiles) > 0 { + runPushLog.Printf("Found %d extra staged files not in our list, refusing to proceed: %v", len(extraStagedFiles), extraStagedFiles) + + fmt.Fprintln(os.Stderr, "") + fmt.Fprintln(os.Stderr, console.FormatErrorMessage("Cannot proceed: there are already staged files in git that are not part of this workflow")) + fmt.Fprintln(os.Stderr, "") + fmt.Fprintln(os.Stderr, console.FormatInfoMessage("Extra staged files:")) + for _, file := range extraStagedFiles { + fmt.Fprintf(os.Stderr, " - %s\n", file) + } + fmt.Fprintln(os.Stderr, "") + fmt.Fprintln(os.Stderr, console.FormatInfoMessage("Please commit or unstage these files before using --push")) + fmt.Fprintln(os.Stderr, "") + + return fmt.Errorf("git has staged files not part of workflow - commit or unstage them before using --push") + } + runPushLog.Printf("No extra staged files detected - all staged files are part of our workflow") + + // Create commit message + commitMessage := fmt.Sprintf("Updated agentic workflow %s", workflowName) + runPushLog.Printf("Creating commit with message: %s", commitMessage) + + // Show what will be committed and ask for confirmation using console helper + fmt.Fprintln(os.Stderr, "") + fmt.Fprintln(os.Stderr, console.FormatInfoMessage("Ready to commit and push the following files:")) + for _, file := range files { + fmt.Fprintf(os.Stderr, " - %s\n", file) + } + fmt.Fprintln(os.Stderr, "") + fmt.Fprintf(os.Stderr, console.FormatInfoMessage("Commit message: %s\n"), commitMessage) + fmt.Fprintln(os.Stderr, "") + + // Ask for confirmation using console helper + runPushLog.Printf("Requesting user confirmation for commit and push") + confirmed, err := console.ConfirmAction( + "Do you want to commit and push these changes?", + "Yes, commit and push", + "No, cancel", + ) + if err != nil { + runPushLog.Printf("Confirmation failed: %v", err) + return fmt.Errorf("confirmation failed: %w", err) + } + + if !confirmed { + runPushLog.Print("Push cancelled by user") + return fmt.Errorf("push cancelled by user") + } + runPushLog.Printf("User confirmed - proceeding with commit and push") + + // Commit the changes + runPushLog.Printf("Executing git commit with message: %s", commitMessage) + cmd = exec.Command("git", "commit", "-m", commitMessage) + if output, err := cmd.CombinedOutput(); err != nil { + runPushLog.Printf("Failed to commit: %v, output: %s", err, string(output)) + return fmt.Errorf("failed to commit changes: %w\nOutput: %s", err, string(output)) + } + runPushLog.Printf("Commit successful") + + if verbose { + fmt.Fprintln(os.Stderr, console.FormatSuccessMessage("Changes committed successfully")) + } + + // Push the changes + runPushLog.Print("Pushing changes to remote") + runPushLog.Printf("Executing git push") + cmd = exec.Command("git", "push") + if output, err := cmd.CombinedOutput(); err != nil { + runPushLog.Printf("Failed to push: %v, output: %s", err, string(output)) + return fmt.Errorf("failed to push changes: %w\nOutput: %s", err, string(output)) + } + runPushLog.Printf("Push to remote successful") + + if verbose { + fmt.Fprintln(os.Stderr, console.FormatSuccessMessage("Changes pushed to remote")) + } + + runPushLog.Print("Push completed successfully") + return nil +} diff --git a/pkg/cli/run_push_test.go b/pkg/cli/run_push_test.go new file mode 100644 index 00000000000..57692a1c5f4 --- /dev/null +++ b/pkg/cli/run_push_test.go @@ -0,0 +1,384 @@ +package cli + +import ( + "os" + "os/exec" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestCollectWorkflowFiles_SimpleWorkflow(t *testing.T) { + // Create a temporary directory for testing + tmpDir := t.TempDir() + + // Create a simple workflow file + workflowPath := filepath.Join(tmpDir, "test-workflow.md") + workflowContent := `--- +name: Test Workflow +on: workflow_dispatch +--- +# Test Workflow +This is a test workflow. +` + err := os.WriteFile(workflowPath, []byte(workflowContent), 0644) + require.NoError(t, err) + + // Create the corresponding lock file + lockFilePath := filepath.Join(tmpDir, "test-workflow.lock.yml") + lockContent := `name: Test Workflow +on: workflow_dispatch +` + err = os.WriteFile(lockFilePath, []byte(lockContent), 0644) + require.NoError(t, err) + + // Test collecting files + files, err := collectWorkflowFiles(workflowPath, false) + require.NoError(t, err) + assert.Len(t, files, 2, "Should collect workflow .md and .lock.yml files") + + // Check that both files are in the result + fileSet := make(map[string]bool) + for _, file := range files { + fileSet[file] = true + } + assert.True(t, fileSet[workflowPath], "Should include workflow .md file") + assert.True(t, fileSet[lockFilePath], "Should include lock .yml file") +} + +func TestCollectWorkflowFiles_WithImports(t *testing.T) { + // Create a temporary directory for testing + tmpDir := t.TempDir() + + // Create a shared file + sharedPath := filepath.Join(tmpDir, "shared.md") + sharedContent := `# Shared Content +This is shared content. +` + err := os.WriteFile(sharedPath, []byte(sharedContent), 0644) + require.NoError(t, err) + + // Create a workflow file that imports the shared file + workflowPath := filepath.Join(tmpDir, "test-workflow.md") + workflowContent := `--- +name: Test Workflow +on: workflow_dispatch +imports: + - shared.md +--- +# Test Workflow +This workflow imports shared content. +` + err = os.WriteFile(workflowPath, []byte(workflowContent), 0644) + require.NoError(t, err) + + // Create the corresponding lock file + lockFilePath := filepath.Join(tmpDir, "test-workflow.lock.yml") + lockContent := `name: Test Workflow +on: workflow_dispatch +` + err = os.WriteFile(lockFilePath, []byte(lockContent), 0644) + require.NoError(t, err) + + // Test collecting files + files, err := collectWorkflowFiles(workflowPath, false) + require.NoError(t, err) + assert.Len(t, files, 3, "Should collect workflow, lock, and imported files") + + // Check that all files are in the result + fileSet := make(map[string]bool) + for _, file := range files { + fileSet[file] = true + } + assert.True(t, fileSet[workflowPath], "Should include workflow .md file") + assert.True(t, fileSet[lockFilePath], "Should include lock .yml file") + assert.True(t, fileSet[sharedPath], "Should include imported shared.md file") +} + +func TestCollectWorkflowFiles_TransitiveImports(t *testing.T) { + // Create a temporary directory for testing + tmpDir := t.TempDir() + + // Create base shared file + baseSharedPath := filepath.Join(tmpDir, "base-shared.md") + baseSharedContent := `# Base Shared Content +This is base shared content. +` + err := os.WriteFile(baseSharedPath, []byte(baseSharedContent), 0644) + require.NoError(t, err) + + // Create intermediate shared file that imports base + intermediateSharedPath := filepath.Join(tmpDir, "intermediate-shared.md") + intermediateSharedContent := `--- +imports: + - base-shared.md +--- +# Intermediate Shared Content +This imports base shared. +` + err = os.WriteFile(intermediateSharedPath, []byte(intermediateSharedContent), 0644) + require.NoError(t, err) + + // Create a workflow file that imports the intermediate file + workflowPath := filepath.Join(tmpDir, "test-workflow.md") + workflowContent := `--- +name: Test Workflow +on: workflow_dispatch +imports: + - intermediate-shared.md +--- +# Test Workflow +This workflow imports intermediate shared content. +` + err = os.WriteFile(workflowPath, []byte(workflowContent), 0644) + require.NoError(t, err) + + // Create the corresponding lock file + lockFilePath := filepath.Join(tmpDir, "test-workflow.lock.yml") + lockContent := `name: Test Workflow +on: workflow_dispatch +` + err = os.WriteFile(lockFilePath, []byte(lockContent), 0644) + require.NoError(t, err) + + // Test collecting files + files, err := collectWorkflowFiles(workflowPath, false) + require.NoError(t, err) + assert.Len(t, files, 4, "Should collect workflow, lock, and all transitive imports") + + // Check that all files are in the result + fileSet := make(map[string]bool) + for _, file := range files { + fileSet[file] = true + } + assert.True(t, fileSet[workflowPath], "Should include workflow .md file") + assert.True(t, fileSet[lockFilePath], "Should include lock .yml file") + assert.True(t, fileSet[intermediateSharedPath], "Should include intermediate-shared.md file") + assert.True(t, fileSet[baseSharedPath], "Should include base-shared.md file") +} + +func TestCollectWorkflowFiles_NoLockFile(t *testing.T) { + // Create a temporary directory for testing + tmpDir := t.TempDir() + + // Create a simple workflow file without a lock file + workflowPath := filepath.Join(tmpDir, "test-workflow.md") + workflowContent := `--- +name: Test Workflow +on: workflow_dispatch +--- +# Test Workflow +This is a test workflow without a lock file. +` + err := os.WriteFile(workflowPath, []byte(workflowContent), 0644) + require.NoError(t, err) + + // Test collecting files - should now compile the workflow and create lock file + files, err := collectWorkflowFiles(workflowPath, false) + require.NoError(t, err) + assert.Len(t, files, 2, "Should collect workflow .md file and auto-generate lock file") + + // Check that both workflow file and lock file are in the result + fileSet := make(map[string]bool) + for _, file := range files { + fileSet[file] = true + } + assert.True(t, fileSet[workflowPath], "Should include workflow .md file") + + lockFilePath := strings.TrimSuffix(workflowPath, ".md") + ".lock.yml" + assert.True(t, fileSet[lockFilePath], "Should include auto-generated lock .yml file") +} + +func TestIsWorkflowSpecFormatLocal(t *testing.T) { + tests := []struct { + name string + path string + expected bool + }{ + { + name: "workflowspec with SHA", + path: "owner/repo/path/file.md@abc123", + expected: true, + }, + { + name: "workflowspec without SHA", + path: "owner/repo/path/file.md", + expected: true, + }, + { + name: "relative path with ./", + path: "./shared/file.md", + expected: false, + }, + { + name: "relative path without ./", + path: "shared/file.md", + expected: false, + }, + { + name: "absolute path", + path: "/shared/file.md", + expected: false, + }, + { + name: "workflowspec with section", + path: "owner/repo/path/file.md#section", + expected: true, + }, + { + name: "simple filename", + path: "file.md", + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := isWorkflowSpecFormatLocal(tt.path) + assert.Equal(t, tt.expected, result, "isWorkflowSpecFormatLocal(%q) = %v, want %v", tt.path, result, tt.expected) + }) + } +} + +func TestResolveImportPathLocal(t *testing.T) { + // Create a temporary directory for testing + tmpDir := t.TempDir() + baseDir := filepath.Join(tmpDir, "workflows") + err := os.MkdirAll(baseDir, 0755) + require.NoError(t, err) + + tests := []struct { + name string + importPath string + baseDir string + expected string + }{ + { + name: "relative path", + importPath: "shared/file.md", + baseDir: baseDir, + expected: filepath.Join(baseDir, "shared/file.md"), + }, + { + name: "path with section", + importPath: "shared/file.md#section", + baseDir: baseDir, + expected: filepath.Join(baseDir, "shared/file.md"), + }, + { + name: "workflowspec format with @", + importPath: "owner/repo/path/file.md@abc123", + baseDir: baseDir, + expected: "", + }, + { + name: "workflowspec format without @", + importPath: "owner/repo/path/file.md", + baseDir: baseDir, + expected: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := resolveImportPathLocal(tt.importPath, tt.baseDir) + assert.Equal(t, tt.expected, result, "resolveImportPathLocal(%q, %q) = %v, want %v", tt.importPath, tt.baseDir, result, tt.expected) + }) + } +} + +func TestCollectWorkflowFiles_WithOutdatedLockFile(t *testing.T) { + // Create a temporary directory for testing + tmpDir := t.TempDir() + + // Create a workflow file + workflowPath := filepath.Join(tmpDir, "test-workflow.md") + workflowContent := `--- +name: Test Workflow +on: workflow_dispatch +--- +# Test Workflow +This is a test workflow. +` + err := os.WriteFile(workflowPath, []byte(workflowContent), 0644) + require.NoError(t, err) + + // Create an old lock file (simulate outdated) + lockFilePath := filepath.Join(tmpDir, "test-workflow.lock.yml") + lockContent := `name: Test Workflow +on: workflow_dispatch +` + err = os.WriteFile(lockFilePath, []byte(lockContent), 0644) + require.NoError(t, err) + + // Make the workflow file newer by sleeping and touching it + time.Sleep(100 * time.Millisecond) + currentTime := time.Now() + err = os.Chtimes(workflowPath, currentTime, currentTime) + require.NoError(t, err) + + // Verify the lock file is older + mdStat, err := os.Stat(workflowPath) + require.NoError(t, err) + lockStat, err := os.Stat(lockFilePath) + require.NoError(t, err) + assert.True(t, mdStat.ModTime().After(lockStat.ModTime()), "Workflow file should be newer than lock file") + + // Note: We can't actually test recompilation here without a full compilation setup, + // but we can verify the detection logic works + // The actual compilation would happen in an integration test +} + +func TestPushWorkflowFiles_WithStagedFiles(t *testing.T) { + // Create a temporary directory for testing + tmpDir := t.TempDir() + + // Initialize a git repo + cmd := exec.Command("git", "init") + cmd.Dir = tmpDir + err := cmd.Run() + require.NoError(t, err) + + // Configure git + cmd = exec.Command("git", "config", "user.email", "test@example.com") + cmd.Dir = tmpDir + err = cmd.Run() + require.NoError(t, err) + + cmd = exec.Command("git", "config", "user.name", "Test User") + cmd.Dir = tmpDir + err = cmd.Run() + require.NoError(t, err) + + // Create a test file and stage it + testFile := filepath.Join(tmpDir, "test-file.txt") + err = os.WriteFile(testFile, []byte("test content"), 0644) + require.NoError(t, err) + + cmd = exec.Command("git", "add", "test-file.txt") + cmd.Dir = tmpDir + err = cmd.Run() + require.NoError(t, err) + + // Save current directory and change to tmpDir + originalDir, err := os.Getwd() + require.NoError(t, err) + err = os.Chdir(tmpDir) + require.NoError(t, err) + defer os.Chdir(originalDir) + + // Try to push workflow files - should fail due to staged files + workflowFile := filepath.Join(tmpDir, "workflow.md") + err = os.WriteFile(workflowFile, []byte("# Test"), 0644) + require.NoError(t, err) + + err = pushWorkflowFiles("test-workflow", []string{workflowFile}, "", false) + + // Should return an error about staged files + require.Error(t, err) + assert.Contains(t, err.Error(), "staged files") +} diff --git a/pkg/console/accessibility.go b/pkg/console/accessibility.go new file mode 100644 index 00000000000..d33cb83eac6 --- /dev/null +++ b/pkg/console/accessibility.go @@ -0,0 +1,19 @@ +package console + +import "os" + +// IsAccessibleMode detects if accessibility mode should be enabled based on environment variables. +// Accessibility mode is enabled when: +// - ACCESSIBLE environment variable is set to any value +// - TERM environment variable is set to "dumb" +// - NO_COLOR environment variable is set to any value +// +// This function should be used by UI components to determine whether to: +// - Disable animations and spinners +// - Simplify interactive elements +// - Use plain text instead of fancy formatting +func IsAccessibleMode() bool { + return os.Getenv("ACCESSIBLE") != "" || + os.Getenv("TERM") == "dumb" || + os.Getenv("NO_COLOR") != "" +} diff --git a/pkg/console/accessibility_test.go b/pkg/console/accessibility_test.go new file mode 100644 index 00000000000..914e7154bba --- /dev/null +++ b/pkg/console/accessibility_test.go @@ -0,0 +1,83 @@ +package console + +import ( + "os" + "testing" +) + +func TestIsAccessibleMode(t *testing.T) { + tests := []struct { + name string + envVars map[string]string + expected bool + }{ + { + name: "ACCESSIBLE set", + envVars: map[string]string{"ACCESSIBLE": "1"}, + expected: true, + }, + { + name: "TERM=dumb", + envVars: map[string]string{"TERM": "dumb"}, + expected: true, + }, + { + name: "NO_COLOR set", + envVars: map[string]string{"NO_COLOR": "1"}, + expected: true, + }, + { + name: "no accessibility indicators", + envVars: map[string]string{}, + expected: false, + }, + { + name: "TERM not dumb", + envVars: map[string]string{"TERM": "xterm-256color"}, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Save original environment + origAccessible := os.Getenv("ACCESSIBLE") + origTerm := os.Getenv("TERM") + origNoColor := os.Getenv("NO_COLOR") + + // Clean up after test + defer func() { + if origAccessible != "" { + os.Setenv("ACCESSIBLE", origAccessible) + } else { + os.Unsetenv("ACCESSIBLE") + } + if origTerm != "" { + os.Setenv("TERM", origTerm) + } else { + os.Unsetenv("TERM") + } + if origNoColor != "" { + os.Setenv("NO_COLOR", origNoColor) + } else { + os.Unsetenv("NO_COLOR") + } + }() + + // Clear all relevant env vars first + for _, key := range []string{"ACCESSIBLE", "TERM", "NO_COLOR"} { + os.Unsetenv(key) + } + + // Set test env vars + for key, value := range tt.envVars { + os.Setenv(key, value) + } + + result := IsAccessibleMode() + if result != tt.expected { + t.Errorf("IsAccessibleMode() = %v, want %v", result, tt.expected) + } + }) + } +} diff --git a/pkg/console/confirm.go b/pkg/console/confirm.go new file mode 100644 index 00000000000..26f8c3e9539 --- /dev/null +++ b/pkg/console/confirm.go @@ -0,0 +1,27 @@ +package console + +import ( + "github.com/charmbracelet/huh" +) + +// ConfirmAction shows an interactive confirmation dialog using Bubble Tea (huh) +// Returns true if the user confirms, false if they cancel or an error occurs +func ConfirmAction(title, affirmative, negative string) (bool, error) { + var confirmed bool + + confirmForm := huh.NewForm( + huh.NewGroup( + huh.NewConfirm(). + Title(title). + Affirmative(affirmative). + Negative(negative). + Value(&confirmed), + ), + ).WithAccessible(IsAccessibleMode()) + + if err := confirmForm.Run(); err != nil { + return false, err + } + + return confirmed, nil +} diff --git a/pkg/console/confirm_test.go b/pkg/console/confirm_test.go new file mode 100644 index 00000000000..86d8086ccdc --- /dev/null +++ b/pkg/console/confirm_test.go @@ -0,0 +1,16 @@ +package console + +import ( + "testing" +) + +func TestConfirmAction(t *testing.T) { + // Note: This test can't fully test the interactive behavior without mocking + // the terminal input, but we can verify the function signature and basic setup + + t.Run("function signature", func(t *testing.T) { + // This test just verifies the function exists and has the right signature + // Actual interactive testing would require a mock terminal + _ = ConfirmAction + }) +} diff --git a/pkg/workflow/action_pins_logging_test.go b/pkg/workflow/action_pins_logging_test.go index 9e9f3549c67..f395799c622 100644 --- a/pkg/workflow/action_pins_logging_test.go +++ b/pkg/workflow/action_pins_logging_test.go @@ -174,106 +174,106 @@ func TestActionPinResolutionWithStrictMode(t *testing.T) { // TestActionCacheDuplicateSHAWarning verifies that we log warnings when multiple // version references resolve to the same SHA, which can cause version comment flipping func TestActionCacheDuplicateSHAWarning(t *testing.T) { -// Create a test cache with one entry -cache := &ActionCache{ -Entries: map[string]ActionCacheEntry{ -"actions/github-script@v8": { -Repo: "actions/github-script", -Version: "v8", -SHA: "ed597411d8f924073f98dfc5c65a23a2325f34cd", -}, -}, -path: "/tmp/test-cache.json", -} + // Create a test cache with one entry + cache := &ActionCache{ + Entries: map[string]ActionCacheEntry{ + "actions/github-script@v8": { + Repo: "actions/github-script", + Version: "v8", + SHA: "ed597411d8f924073f98dfc5c65a23a2325f34cd", + }, + }, + path: "/tmp/test-cache.json", + } -// Add a second entry with the same SHA but different version -cache.Set("actions/github-script", "v8.0.0", "ed597411d8f924073f98dfc5c65a23a2325f34cd") + // Add a second entry with the same SHA but different version + cache.Set("actions/github-script", "v8.0.0", "ed597411d8f924073f98dfc5c65a23a2325f34cd") -// Verify both entries are in the cache -if len(cache.Entries) != 2 { -t.Errorf("Expected 2 cache entries, got %d", len(cache.Entries)) -} + // Verify both entries are in the cache + if len(cache.Entries) != 2 { + t.Errorf("Expected 2 cache entries, got %d", len(cache.Entries)) + } -// Verify both have the same SHA (this is what causes the issue) -v8Entry := cache.Entries["actions/github-script@v8"] -v800Entry := cache.Entries["actions/github-script@v8.0.0"] -if v8Entry.SHA != v800Entry.SHA { -t.Error("Expected both entries to have the same SHA") -} + // Verify both have the same SHA (this is what causes the issue) + v8Entry := cache.Entries["actions/github-script@v8"] + v800Entry := cache.Entries["actions/github-script@v8.0.0"] + if v8Entry.SHA != v800Entry.SHA { + t.Error("Expected both entries to have the same SHA") + } -t.Logf("Cache has duplicate SHA entries with different versions:") -t.Logf(" v8: %s", v8Entry.SHA[:8]) -t.Logf(" v8.0.0: %s", v800Entry.SHA[:8]) -t.Logf("This configuration causes version comment flipping in lock files") + t.Logf("Cache has duplicate SHA entries with different versions:") + t.Logf(" v8: %s", v8Entry.SHA[:8]) + t.Logf(" v8.0.0: %s", v800Entry.SHA[:8]) + t.Logf("This configuration causes version comment flipping in lock files") } // TestDeduplicationRemovesLessPreciseVersions verifies that deduplication // keeps the most precise version and logs detailed information func TestDeduplicationRemovesLessPreciseVersions(t *testing.T) { -tests := []struct { -name string -entries map[string]ActionCacheEntry -expectedKeep string -expectedRemoveCount int -}{ -{ -name: "v8.0.0 is kept over v8", -entries: map[string]ActionCacheEntry{ -"actions/github-script@v8": { -Repo: "actions/github-script", -Version: "v8", -SHA: "ed597411d8f924073f98dfc5c65a23a2325f34cd", -}, -"actions/github-script@v8.0.0": { -Repo: "actions/github-script", -Version: "v8.0.0", -SHA: "ed597411d8f924073f98dfc5c65a23a2325f34cd", -}, -}, -expectedKeep: "actions/github-script@v8.0.0", -expectedRemoveCount: 1, -}, -{ -name: "v6.1.0 is kept over v6", -entries: map[string]ActionCacheEntry{ -"actions/setup-node@v6": { -Repo: "actions/setup-node", -Version: "v6", -SHA: "395ad3262231945c25e8478fd5baf05154b1d79f", -}, -"actions/setup-node@v6.1.0": { -Repo: "actions/setup-node", -Version: "v6.1.0", -SHA: "395ad3262231945c25e8478fd5baf05154b1d79f", -}, -}, -expectedKeep: "actions/setup-node@v6.1.0", -expectedRemoveCount: 1, -}, -} + tests := []struct { + name string + entries map[string]ActionCacheEntry + expectedKeep string + expectedRemoveCount int + }{ + { + name: "v8.0.0 is kept over v8", + entries: map[string]ActionCacheEntry{ + "actions/github-script@v8": { + Repo: "actions/github-script", + Version: "v8", + SHA: "ed597411d8f924073f98dfc5c65a23a2325f34cd", + }, + "actions/github-script@v8.0.0": { + Repo: "actions/github-script", + Version: "v8.0.0", + SHA: "ed597411d8f924073f98dfc5c65a23a2325f34cd", + }, + }, + expectedKeep: "actions/github-script@v8.0.0", + expectedRemoveCount: 1, + }, + { + name: "v6.1.0 is kept over v6", + entries: map[string]ActionCacheEntry{ + "actions/setup-node@v6": { + Repo: "actions/setup-node", + Version: "v6", + SHA: "395ad3262231945c25e8478fd5baf05154b1d79f", + }, + "actions/setup-node@v6.1.0": { + Repo: "actions/setup-node", + Version: "v6.1.0", + SHA: "395ad3262231945c25e8478fd5baf05154b1d79f", + }, + }, + expectedKeep: "actions/setup-node@v6.1.0", + expectedRemoveCount: 1, + }, + } -for _, tt := range tests { -t.Run(tt.name, func(t *testing.T) { -cache := &ActionCache{ -Entries: tt.entries, -path: "/tmp/test-cache.json", -} + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cache := &ActionCache{ + Entries: tt.entries, + path: "/tmp/test-cache.json", + } -initialCount := len(cache.Entries) -cache.deduplicateEntries() + initialCount := len(cache.Entries) + cache.deduplicateEntries() -if _, exists := cache.Entries[tt.expectedKeep]; !exists { -t.Errorf("Expected entry %s to be kept, but it was removed", tt.expectedKeep) -} + if _, exists := cache.Entries[tt.expectedKeep]; !exists { + t.Errorf("Expected entry %s to be kept, but it was removed", tt.expectedKeep) + } -removed := initialCount - len(cache.Entries) -if removed != tt.expectedRemoveCount { -t.Errorf("Expected %d entries to be removed, but %d were removed", -tt.expectedRemoveCount, removed) -} + removed := initialCount - len(cache.Entries) + if removed != tt.expectedRemoveCount { + t.Errorf("Expected %d entries to be removed, but %d were removed", + tt.expectedRemoveCount, removed) + } -t.Logf("Deduplication kept %s, removed %d less precise entries", -tt.expectedKeep, removed) -}) -} + t.Logf("Deduplication kept %s, removed %d less precise entries", + tt.expectedKeep, removed) + }) + } } diff --git a/pkg/workflow/compiler_safe_outputs_config.go b/pkg/workflow/compiler_safe_outputs_config.go index d1167e9e819..d4aafb77c4d 100644 --- a/pkg/workflow/compiler_safe_outputs_config.go +++ b/pkg/workflow/compiler_safe_outputs_config.go @@ -1,14 +1,10 @@ package workflow import ( -"encoding/json" -"fmt" - -"github.com/githubnext/gh-aw/pkg/logger" + "encoding/json" + "fmt" ) -var consolidatedSafeOutputsConfigLog = logger.New("workflow:compiler_safe_outputs_config") - func (c *Compiler) addHandlerManagerConfigEnvVar(steps *[]string, data *WorkflowData) { if data.SafeOutputs == nil { return diff --git a/pkg/workflow/compiler_safe_outputs_config_test.go b/pkg/workflow/compiler_safe_outputs_config_test.go index 68229838fd4..f312df285a4 100644 --- a/pkg/workflow/compiler_safe_outputs_config_test.go +++ b/pkg/workflow/compiler_safe_outputs_config_test.go @@ -278,7 +278,6 @@ func TestHandlerConfigMaxValues(t *testing.T) { var steps []string compiler.addHandlerManagerConfigEnvVar(&steps, workflowData) - // Extract and validate JSON for _, step := range steps { if strings.Contains(step, "GH_AW_SAFE_OUTPUTS_HANDLER_CONFIG") { @@ -297,7 +296,7 @@ func TestHandlerConfigMaxValues(t *testing.T) { maxVal, ok := issueConfig["max"] require.True(t, ok) - assert.Equal(t, float64(10), maxVal) + assert.InDelta(t, float64(10), maxVal, 0.0001) } } } @@ -319,7 +318,6 @@ func TestHandlerConfigAllowedLabels(t *testing.T) { var steps []string compiler.addHandlerManagerConfigEnvVar(&steps, workflowData) - // Extract and validate JSON for _, step := range steps { if strings.Contains(step, "GH_AW_SAFE_OUTPUTS_HANDLER_CONFIG") { @@ -438,8 +436,8 @@ func TestHandlerConfigBooleanFields(t *testing.T) { // TestHandlerConfigUpdateFields tests update field configurations func TestHandlerConfigUpdateFields(t *testing.T) { tests := []struct { - name string - config *UpdateIssuesConfig + name string + config *UpdateIssuesConfig expectedKeys []string }{ { @@ -568,9 +566,9 @@ func TestHandlerConfigTargetRepo(t *testing.T) { // TestHandlerConfigPatchSize tests max patch size configuration func TestHandlerConfigPatchSize(t *testing.T) { tests := []struct { - name string - maxPatchSize int - expectedSize int + name string + maxPatchSize int + expectedSize int }{ { name: "default patch size", @@ -619,7 +617,7 @@ func TestHandlerConfigPatchSize(t *testing.T) { maxSize, ok := prConfig["max_patch_size"] require.True(t, ok) - assert.Equal(t, float64(tt.expectedSize), maxSize) + assert.InDelta(t, float64(tt.expectedSize), maxSize, 0.0001) } } } diff --git a/pkg/workflow/compiler_safe_outputs_core.go b/pkg/workflow/compiler_safe_outputs_core.go index 9b9686e6639..4181a7ce374 100644 --- a/pkg/workflow/compiler_safe_outputs_core.go +++ b/pkg/workflow/compiler_safe_outputs_core.go @@ -1,7 +1,7 @@ package workflow import ( -"github.com/githubnext/gh-aw/pkg/logger" + "github.com/githubnext/gh-aw/pkg/logger" ) var consolidatedSafeOutputsLog = logger.New("workflow:compiler_safe_outputs_consolidated") @@ -9,18 +9,18 @@ var consolidatedSafeOutputsLog = logger.New("workflow:compiler_safe_outputs_cons // SafeOutputStepConfig holds configuration for building a single safe output step // within the consolidated safe-outputs job type SafeOutputStepConfig struct { -StepName string // Human-readable step name (e.g., "Create Issue") -StepID string // Step ID for referencing outputs (e.g., "create_issue") -Script string // JavaScript script to execute (for inline mode) -ScriptName string // Name of the script in the registry (for file mode) -CustomEnvVars []string // Environment variables specific to this step -Condition ConditionNode // Step-level condition (if clause) -Token string // GitHub token for this step -UseCopilotToken bool // Whether to use Copilot token preference chain -UseAgentToken bool // Whether to use agent token preference chain -PreSteps []string // Optional steps to run before the script step -PostSteps []string // Optional steps to run after the script step -Outputs map[string]string // Outputs from this step + StepName string // Human-readable step name (e.g., "Create Issue") + StepID string // Step ID for referencing outputs (e.g., "create_issue") + Script string // JavaScript script to execute (for inline mode) + ScriptName string // Name of the script in the registry (for file mode) + CustomEnvVars []string // Environment variables specific to this step + Condition ConditionNode // Step-level condition (if clause) + Token string // GitHub token for this step + UseCopilotToken bool // Whether to use Copilot token preference chain + UseAgentToken bool // Whether to use agent token preference chain + PreSteps []string // Optional steps to run before the script step + PostSteps []string // Optional steps to run after the script step + Outputs map[string]string // Outputs from this step } // Note: The implementation functions have been moved to focused module files: diff --git a/pkg/workflow/compiler_safe_outputs_env.go b/pkg/workflow/compiler_safe_outputs_env.go index 99dcc41fe69..d26d5695c58 100644 --- a/pkg/workflow/compiler_safe_outputs_env.go +++ b/pkg/workflow/compiler_safe_outputs_env.go @@ -1,11 +1,5 @@ package workflow -import ( -"github.com/githubnext/gh-aw/pkg/logger" -) - -var consolidatedSafeOutputsEnvLog = logger.New("workflow:compiler_safe_outputs_env") - func (c *Compiler) addAllSafeOutputConfigEnvVars(steps *[]string, data *WorkflowData) { if data.SafeOutputs == nil { return diff --git a/pkg/workflow/compiler_safe_outputs_env_test.go b/pkg/workflow/compiler_safe_outputs_env_test.go index f91f5c021f1..9e4256125b4 100644 --- a/pkg/workflow/compiler_safe_outputs_env_test.go +++ b/pkg/workflow/compiler_safe_outputs_env_test.go @@ -11,10 +11,10 @@ import ( // TestAddAllSafeOutputConfigEnvVars tests environment variable generation for all safe output types func TestAddAllSafeOutputConfigEnvVars(t *testing.T) { tests := []struct { - name string - safeOutputs *SafeOutputsConfig - trialMode bool - checkContains []string + name string + safeOutputs *SafeOutputsConfig + trialMode bool + checkContains []string checkNotContains []string }{ { @@ -70,7 +70,7 @@ func TestAddAllSafeOutputConfigEnvVars(t *testing.T) { { name: "update issues with staged flag", safeOutputs: &SafeOutputsConfig{ - Staged: true, + Staged: true, UpdateIssues: &UpdateIssuesConfig{}, }, checkContains: []string{ @@ -80,7 +80,7 @@ func TestAddAllSafeOutputConfigEnvVars(t *testing.T) { { name: "update discussions with staged flag", safeOutputs: &SafeOutputsConfig{ - Staged: true, + Staged: true, UpdateDiscussions: &UpdateDiscussionsConfig{}, }, checkContains: []string{ @@ -306,7 +306,7 @@ func TestEnvVarsWithMultipleSafeOutputTypes(t *testing.T) { AddLabels: &AddLabelsConfig{ Allowed: []string{"bug", "enhancement"}, }, - UpdateIssues: &UpdateIssuesConfig{}, + UpdateIssues: &UpdateIssuesConfig{}, UpdateDiscussions: &UpdateDiscussionsConfig{}, }, } diff --git a/pkg/workflow/compiler_safe_outputs_job_test.go b/pkg/workflow/compiler_safe_outputs_job_test.go index ebbef988627..e81cf2ddb25 100644 --- a/pkg/workflow/compiler_safe_outputs_job_test.go +++ b/pkg/workflow/compiler_safe_outputs_job_test.go @@ -12,14 +12,14 @@ import ( // TestBuildConsolidatedSafeOutputsJob tests the main job builder function func TestBuildConsolidatedSafeOutputsJob(t *testing.T) { tests := []struct { - name string - safeOutputs *SafeOutputsConfig - threatDetection bool - expectedJobName string - expectedSteps int - expectNil bool - checkPermissions bool - expectedPerms []string + name string + safeOutputs *SafeOutputsConfig + threatDetection bool + expectedJobName string + expectedSteps int + expectNil bool + checkPermissions bool + expectedPerms []string }{ { name: "no safe outputs configured", @@ -121,7 +121,7 @@ func TestBuildConsolidatedSafeOutputsJob(t *testing.T) { SafeOutputs: tt.safeOutputs, } - job, stepNames, err := compiler.buildConsolidatedSafeOutputsJob(workflowData, string(string(constants.AgentJobName)), "test-workflow.md") + job, stepNames, err := compiler.buildConsolidatedSafeOutputsJob(workflowData, string(constants.AgentJobName), "test-workflow.md") if tt.expectNil { assert.Nil(t, job) @@ -137,7 +137,7 @@ func TestBuildConsolidatedSafeOutputsJob(t *testing.T) { assert.NotEmpty(t, job.Env) // Check job dependencies - assert.Contains(t, job.Needs, string(string(constants.AgentJobName))) + assert.Contains(t, job.Needs, string(constants.AgentJobName)) if tt.threatDetection { assert.Contains(t, job.Needs, string(constants.DetectionJobName)) } @@ -162,12 +162,12 @@ func TestBuildConsolidatedSafeOutputsJob(t *testing.T) { // TestBuildJobLevelSafeOutputEnvVars tests job-level environment variable generation func TestBuildJobLevelSafeOutputEnvVars(t *testing.T) { tests := []struct { - name string - workflowData *WorkflowData - workflowID string - trialMode bool - trialRepo string - expectedVars map[string]string + name string + workflowData *WorkflowData + workflowID string + trialMode bool + trialRepo string + expectedVars map[string]string checkContains bool }{ { @@ -343,7 +343,7 @@ func TestJobWithGitHubApp(t *testing.T) { Name: "Test Workflow", SafeOutputs: &SafeOutputsConfig{ App: &GitHubAppConfig{ - AppID: "12345", + AppID: "12345", PrivateKey: "test-key", }, CreateIssues: &CreateIssuesConfig{ diff --git a/pkg/workflow/compiler_safe_outputs_steps_test.go b/pkg/workflow/compiler_safe_outputs_steps_test.go index 0ed5e3bf72e..7c7420a3f70 100644 --- a/pkg/workflow/compiler_safe_outputs_steps_test.go +++ b/pkg/workflow/compiler_safe_outputs_steps_test.go @@ -11,18 +11,18 @@ import ( // TestBuildConsolidatedSafeOutputStep tests individual step building func TestBuildConsolidatedSafeOutputStep(t *testing.T) { tests := []struct { - name string - config SafeOutputStepConfig - checkContains []string + name string + config SafeOutputStepConfig + checkContains []string checkNotContains []string }{ { name: "basic step with inline script", config: SafeOutputStepConfig{ - StepName: "Test Step", - StepID: "test_step", - Script: "console.log('test');", - Token: "${{ github.token }}", + StepName: "Test Step", + StepID: "test_step", + Script: "console.log('test');", + Token: "${{ github.token }}", }, checkContains: []string{ "name: Test Step", @@ -55,11 +55,11 @@ func TestBuildConsolidatedSafeOutputStep(t *testing.T) { { name: "step with condition", config: SafeOutputStepConfig{ - StepName: "Conditional Step", - StepID: "conditional", - Script: "console.log('test');", - Token: "${{ github.token }}", - Condition: BuildEquals(BuildStringLiteral("test"), BuildStringLiteral("test")), + StepName: "Conditional Step", + StepID: "conditional", + Script: "console.log('test');", + Token: "${{ github.token }}", + Condition: BuildEquals(BuildStringLiteral("test"), BuildStringLiteral("test")), }, checkContains: []string{ "if: 'test' == 'test'", @@ -186,7 +186,7 @@ func TestBuildSharedPRCheckoutSteps(t *testing.T) { name: "with GitHub App token", safeOutputs: &SafeOutputsConfig{ App: &GitHubAppConfig{ - AppID: "12345", + AppID: "12345", PrivateKey: "test-key", }, CreatePullRequests: &CreatePullRequestsConfig{},