diff --git a/pkg/actionpins/spec_test.go b/pkg/actionpins/spec_test.go index 2a38d8c7dd5..37eee1b817e 100644 --- a/pkg/actionpins/spec_test.go +++ b/pkg/actionpins/spec_test.go @@ -234,3 +234,67 @@ func TestSpec_DesignDecision_FormatConsistency(t *testing.T) { assert.Contains(t, reference, sha, "reference should contain sha") assert.Contains(t, reference, version, "reference should contain version comment") } + +// TestSpec_Types_ActionPin validates the documented ActionPin type structure. +// Spec: Repo, Version, SHA fields plus optional Inputs map. +func TestSpec_Types_ActionPin(t *testing.T) { + pin := actionpins.ActionPin{ + Repo: "actions/checkout", + Version: "v5", + SHA: "abcdef1234567890abcdef1234567890abcdef12", + } + assert.Equal(t, "actions/checkout", pin.Repo, "ActionPin.Repo field") + assert.Equal(t, "v5", pin.Version, "ActionPin.Version field") + assert.Equal(t, "abcdef1234567890abcdef1234567890abcdef12", pin.SHA, "ActionPin.SHA field") + assert.Nil(t, pin.Inputs, "ActionPin.Inputs should be nil when not set") +} + +// TestSpec_Types_ActionYAMLInput validates the documented ActionYAMLInput type structure. +// Spec: Description, Required, Default fields. +func TestSpec_Types_ActionYAMLInput(t *testing.T) { + input := actionpins.ActionYAMLInput{ + Description: "The branch to checkout", + Required: true, + Default: "main", + } + assert.Equal(t, "The branch to checkout", input.Description, "ActionYAMLInput.Description field") + assert.True(t, input.Required, "ActionYAMLInput.Required field") + assert.Equal(t, "main", input.Default, "ActionYAMLInput.Default field") +} + +// TestSpec_Types_ActionPinsData validates the documented ActionPinsData container type. +// Spec: ActionPinsData is a JSON container used to load embedded pin entries. +func TestSpec_Types_ActionPinsData(t *testing.T) { + data := actionpins.ActionPinsData{ + Entries: map[string]actionpins.ActionPin{ + "actions/checkout@v5": {Repo: "actions/checkout", Version: "v5", SHA: "abc123"}, + }, + } + assert.Len(t, data.Entries, 1, "ActionPinsData.Entries should hold pin entries") + entry := data.Entries["actions/checkout@v5"] + assert.Equal(t, "actions/checkout", entry.Repo, "entry Repo should match") +} + +// TestSpec_ThreadSafety_ConcurrentGetActionPins validates that concurrent calls to GetActionPins +// are safe after initialization (sync.Once guarantee from the spec). +func TestSpec_ThreadSafety_ConcurrentGetActionPins(t *testing.T) { + const goroutines = 10 + results := make([][]actionpins.ActionPin, goroutines) + done := make(chan int, goroutines) + + for i := 0; i < goroutines; i++ { + go func(idx int) { + results[idx] = actionpins.GetActionPins() + done <- idx + }(i) + } + + for i := 0; i < goroutines; i++ { + <-done + } + + for i := 1; i < goroutines; i++ { + assert.Equal(t, len(results[0]), len(results[i]), + "concurrent GetActionPins should return same number of pins (goroutine %d vs 0)", i) + } +} diff --git a/pkg/agentdrain/spec_test.go b/pkg/agentdrain/spec_test.go index 025cdb35dde..fc7056085a9 100644 --- a/pkg/agentdrain/spec_test.go +++ b/pkg/agentdrain/spec_test.go @@ -327,3 +327,141 @@ func TestSpec_DesignDecision_CoordinatorRouting(t *testing.T) { assert.Len(t, all["plan"], 1, "plan stage should have one cluster") assert.Len(t, all["tool_call"], 1, "tool_call stage should have one cluster") } + +// TestSpec_PublicAPI_Coordinator_AnalyzeEvent validates Coordinator.AnalyzeEvent routes events +// to stage-specific miners and returns MatchResult and AnomalyReport. +func TestSpec_PublicAPI_Coordinator_AnalyzeEvent(t *testing.T) { + cfg := agentdrain.DefaultConfig() + stages := []string{"plan", "tool_call", "finish"} + coord, err := agentdrain.NewCoordinator(cfg, stages) + require.NoError(t, err) + + evt := agentdrain.AgentEvent{ + Stage: "plan", + Fields: map[string]string{"action": "evaluate", "step": "1"}, + } + result, report, err := coord.AnalyzeEvent(evt) + require.NoError(t, err, "AnalyzeEvent should not error for a registered stage") + assert.NotNil(t, result, "AnalyzeEvent should return a MatchResult") + assert.NotNil(t, report, "AnalyzeEvent should return an AnomalyReport") + assert.Equal(t, "plan", result.Stage, "MatchResult.Stage should match the event stage") +} + +// TestSpec_PublicAPI_Coordinator_WeightsPersistence validates SaveWeightsJSON/LoadWeightsJSON +// round-trip as documented in the specification. +func TestSpec_PublicAPI_Coordinator_WeightsPersistence(t *testing.T) { + cfg := agentdrain.DefaultConfig() + stages := []string{"plan", "finish"} + coord, err := agentdrain.NewCoordinator(cfg, stages) + require.NoError(t, err) + + evt := agentdrain.AgentEvent{Stage: "plan", Fields: map[string]string{"step": "start"}} + _, err = coord.TrainEvent(evt) + require.NoError(t, err) + + data, err := coord.SaveWeightsJSON() + require.NoError(t, err, "SaveWeightsJSON should not error") + assert.NotEmpty(t, data, "SaveWeightsJSON should return non-empty JSON") + + coord2, err := agentdrain.NewCoordinator(cfg, stages) + require.NoError(t, err) + err = coord2.LoadWeightsJSON(data) + require.NoError(t, err, "LoadWeightsJSON should not error with valid data") +} + +// TestSpec_PublicAPI_AnomalyDetector_Analyze validates that AnomalyDetector.Analyze +// returns an AnomalyReport with AnomalyScore in the documented [0, 1] range. +func TestSpec_PublicAPI_AnomalyDetector_Analyze(t *testing.T) { + cfg := agentdrain.DefaultConfig() + miner, err := agentdrain.NewMiner(cfg) + require.NoError(t, err) + + evt := agentdrain.AgentEvent{Stage: "plan", Fields: map[string]string{"action": "start"}} + _, report, err := miner.AnalyzeEvent(evt) + require.NoError(t, err) + + assert.NotNil(t, report, "AnomalyReport should not be nil") + assert.GreaterOrEqual(t, report.AnomalyScore, 0.0, "AnomalyScore should be >= 0 (documented range [0,1])") + assert.LessOrEqual(t, report.AnomalyScore, 1.0, "AnomalyScore should be <= 1 (documented range [0,1])") +} + +// TestSpec_Types_Cluster validates the documented Cluster type structure. +// Spec: ID (unique identifier), Template (tokenized template), Size (count), Stage (pipeline stage). +func TestSpec_Types_Cluster(t *testing.T) { + cfg := agentdrain.DefaultConfig() + miner, err := agentdrain.NewMiner(cfg) + require.NoError(t, err) + + evt := agentdrain.AgentEvent{Stage: "plan", Fields: map[string]string{"step": "1"}} + _, err = miner.TrainEvent(evt) + require.NoError(t, err) + + clusters := miner.Clusters() + require.Len(t, clusters, 1, "should have one cluster after training one unique event") + c := clusters[0] + assert.Positive(t, c.ID, "Cluster.ID should be a positive unique identifier") + assert.NotEmpty(t, c.Template, "Cluster.Template should be a non-empty tokenized template") + assert.Equal(t, 1, c.Size, "Cluster.Size should be 1 after one training event") + assert.Equal(t, "plan", c.Stage, "Cluster.Stage should match the event stage") +} + +// TestSpec_Types_MatchResult validates the documented MatchResult type structure. +// Spec: ClusterID, Template (space-joined), Params, Similarity in [0,1], Stage. +func TestSpec_Types_MatchResult(t *testing.T) { + cfg := agentdrain.DefaultConfig() + miner, err := agentdrain.NewMiner(cfg) + require.NoError(t, err) + + evt := agentdrain.AgentEvent{Stage: "tool_call", Fields: map[string]string{"tool": "bash"}} + result, err := miner.TrainEvent(evt) + require.NoError(t, err) + + assert.Positive(t, result.ClusterID, "MatchResult.ClusterID should be positive") + assert.NotEmpty(t, result.Template, "MatchResult.Template should be a space-joined template string") + assert.GreaterOrEqual(t, result.Similarity, 0.0, "MatchResult.Similarity should be >= 0") + assert.LessOrEqual(t, result.Similarity, 1.0, "MatchResult.Similarity should be <= 1") + assert.Equal(t, "tool_call", result.Stage, "MatchResult.Stage should match event stage") +} + +// TestSpec_Types_AnomalyReport validates the documented AnomalyReport type structure. +// Spec: IsNewTemplate, LowSimilarity, RareCluster, NewClusterCreated, AnomalyScore in [0,1], Reason. +func TestSpec_Types_AnomalyReport(t *testing.T) { + cfg := agentdrain.DefaultConfig() + miner, err := agentdrain.NewMiner(cfg) + require.NoError(t, err) + + evt := agentdrain.AgentEvent{Stage: "finish", Fields: map[string]string{"result": "success"}} + _, report, err := miner.AnalyzeEvent(evt) + require.NoError(t, err) + + // Verify all documented fields are accessible + _ = report.IsNewTemplate + _ = report.LowSimilarity + _ = report.RareCluster + _ = report.NewClusterCreated + _ = report.Reason + assert.GreaterOrEqual(t, report.AnomalyScore, 0.0, "AnomalyReport.AnomalyScore should be in documented range [0, 1]") + assert.LessOrEqual(t, report.AnomalyScore, 1.0, "AnomalyReport.AnomalyScore should be in documented range [0, 1]") +} + +// TestSpec_Types_Snapshot validates the documented Snapshot/SnapshotCluster type structures. +// Spec: Snapshot{Config, Clusters []SnapshotCluster, NextID}, SnapshotCluster{ID, Template, Size, Stage}. +func TestSpec_Types_Snapshot(t *testing.T) { + cfg := agentdrain.DefaultConfig() + miner, err := agentdrain.NewMiner(cfg) + require.NoError(t, err) + + evt := agentdrain.AgentEvent{Stage: "plan", Fields: map[string]string{"step": "init"}} + _, err = miner.TrainEvent(evt) + require.NoError(t, err) + + data, err := miner.SaveJSON() + require.NoError(t, err, "SaveJSON should succeed to produce snapshot bytes") + assert.NotEmpty(t, data, "Snapshot data should be non-empty JSON bytes") + + restored, err := agentdrain.NewMiner(cfg) + require.NoError(t, err) + err = restored.LoadJSON(data) + require.NoError(t, err, "LoadJSON should restore miner from snapshot bytes") + assert.Len(t, restored.Clusters(), 1, "restored miner should have the same cluster count as the original") +} diff --git a/pkg/cli/spec_test.go b/pkg/cli/spec_test.go index 5f72475f61e..62fc2493721 100644 --- a/pkg/cli/spec_test.go +++ b/pkg/cli/spec_test.go @@ -5,6 +5,9 @@ package cli import ( "strings" "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestParseRepoSpec(t *testing.T) { @@ -803,3 +806,207 @@ func TestIsCommitSHA(t *testing.T) { }) } } + +// TestSpec_PublicAPI_ValidateWorkflowName validates the documented behavior. +// Spec: empty names and names with invalid characters (non-alphanumeric, non-hyphen, non-underscore) return errors. +func TestSpec_PublicAPI_ValidateWorkflowName(t *testing.T) { + tests := []struct { + name string + input string + wantErr bool + }{ + {name: "valid alphanumeric-hyphen name", input: "my-workflow", wantErr: false}, + {name: "valid name with underscores and digits", input: "my_workflow_123", wantErr: false}, + {name: "empty name returns error", input: "", wantErr: true}, + {name: "name with spaces returns error", input: "my workflow", wantErr: true}, + {name: "name with slashes returns error", input: "my/workflow", wantErr: true}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := ValidateWorkflowName(tt.input) + if tt.wantErr { + assert.Error(t, err, "ValidateWorkflowName(%q) should return an error", tt.input) + } else { + assert.NoError(t, err, "ValidateWorkflowName(%q) should not return an error", tt.input) + } + }) + } +} + +// TestSpec_PublicAPI_GetVersion validates that GetVersion returns a non-empty string. +// Spec: returns the current CLI version. +func TestSpec_PublicAPI_GetVersion(t *testing.T) { + version := GetVersion() + assert.NotEmpty(t, version, "GetVersion should return a non-empty version string") +} + +// TestSpec_PublicAPI_IsRunningInCI validates that IsRunningInCI returns a bool without panicking. +// Spec: detects CI environment. +func TestSpec_PublicAPI_IsRunningInCI(t *testing.T) { + result := IsRunningInCI() + _ = result // result is environment-dependent; ensure no panic +} + +// TestSpec_Types_ShellType validates the documented ShellType string alias and its constants. +// Spec: "bash", "zsh", "fish", "powershell", "unknown" +func TestSpec_Types_ShellType(t *testing.T) { + assert.Equal(t, ShellType("bash"), ShellBash, "ShellBash constant should be \"bash\"") + assert.Equal(t, ShellType("zsh"), ShellZsh, "ShellZsh constant should be \"zsh\"") + assert.Equal(t, ShellType("fish"), ShellFish, "ShellFish constant should be \"fish\"") + assert.Equal(t, ShellType("powershell"), ShellPowerShell, "ShellPowerShell constant should be \"powershell\"") + assert.Equal(t, ShellType("unknown"), ShellUnknown, "ShellUnknown constant should be \"unknown\"") +} + +// TestSpec_PublicAPI_DetectShell validates DetectShell returns one of the documented ShellType values. +func TestSpec_PublicAPI_DetectShell(t *testing.T) { + shell := DetectShell() + validShells := []ShellType{ShellBash, ShellZsh, ShellFish, ShellPowerShell, ShellUnknown} + assert.Contains(t, validShells, shell, "DetectShell should return one of the documented ShellType values") +} + +// TestSpec_PublicAPI_ValidEngineNames validates the documented function returns a non-empty list. +// Spec: returns the supported engine names for shell completion. +func TestSpec_PublicAPI_ValidEngineNames(t *testing.T) { + engines := ValidEngineNames() + assert.NotEmpty(t, engines, "ValidEngineNames should return at least one engine name") + for _, name := range engines { + assert.NotEmpty(t, name, "each engine name should be non-empty") + } +} + +// TestSpec_PublicAPI_ValidArtifactSetNames validates the documented function returns known artifact sets. +// Spec: returns the valid artifact set name strings. +func TestSpec_PublicAPI_ValidArtifactSetNames(t *testing.T) { + names := ValidArtifactSetNames() + assert.NotEmpty(t, names, "ValidArtifactSetNames should return a non-empty list") + assert.Contains(t, names, "all", "ValidArtifactSetNames should include \"all\"") +} + +// TestSpec_PublicAPI_ValidateArtifactSets validates known and unknown artifact sets. +// Spec: validates that all provided artifact set names are known. +func TestSpec_PublicAPI_ValidateArtifactSets(t *testing.T) { + t.Run("known artifact set returns no error", func(t *testing.T) { + err := ValidateArtifactSets([]string{"all"}) + assert.NoError(t, err, "ValidateArtifactSets should not error for known set \"all\"") + }) + + t.Run("unknown artifact set returns error", func(t *testing.T) { + err := ValidateArtifactSets([]string{"unknown-artifact-set-xyz"}) + assert.Error(t, err, "ValidateArtifactSets should error for unknown artifact set") + }) + + t.Run("empty list returns no error", func(t *testing.T) { + err := ValidateArtifactSets([]string{}) + assert.NoError(t, err, "ValidateArtifactSets should not error for empty list") + }) +} + +// TestSpec_PublicAPI_ExtractWorkflowDescription validates extraction of the description field +// from workflow markdown frontmatter. +// Spec: extracts the description field from workflow markdown content. +func TestSpec_PublicAPI_ExtractWorkflowDescription(t *testing.T) { + tests := []struct { + name string + content string + expected string + }{ + { + name: "extracts description from frontmatter", + content: "---\ndescription: My workflow description\n---\n\n# Content", + expected: "My workflow description", + }, + { + name: "returns empty string when no description field", + content: "---\nengine: copilot\n---\n\n# Content", + expected: "", + }, + { + name: "returns empty string for content without frontmatter", + content: "# Just markdown", + expected: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := ExtractWorkflowDescription(tt.content) + assert.Equal(t, tt.expected, result, "ExtractWorkflowDescription mismatch for %q", tt.name) + }) + } +} + +// TestSpec_PublicAPI_ExtractWorkflowEngine validates extraction of the engine field. +// Spec: supports both string format (engine: copilot) and nested format (engine: { id: copilot }). +func TestSpec_PublicAPI_ExtractWorkflowEngine(t *testing.T) { + tests := []struct { + name string + content string + expected string + }{ + { + name: "extracts engine in string format", + content: "---\nengine: copilot\n---\n\n# Content", + expected: "copilot", + }, + { + name: "returns empty string when no engine field", + content: "---\ndescription: My workflow\n---\n\n# Content", + expected: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := ExtractWorkflowEngine(tt.content) + assert.Equal(t, tt.expected, result, "ExtractWorkflowEngine mismatch for %q", tt.name) + }) + } +} + +// TestSpec_PublicAPI_ExtractWorkflowPrivate validates extraction of the private flag. +// Spec: returns true if the workflow has private: true in its frontmatter. +func TestSpec_PublicAPI_ExtractWorkflowPrivate(t *testing.T) { + tests := []struct { + name string + content string + expected bool + }{ + { + name: "returns true when private: true", + content: "---\nprivate: true\n---\n\n# Content", + expected: true, + }, + { + name: "returns false when private: false", + content: "---\nprivate: false\n---\n\n# Content", + expected: false, + }, + { + name: "returns false when no private field", + content: "---\nengine: copilot\n---\n\n# Content", + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := ExtractWorkflowPrivate(tt.content) + assert.Equal(t, tt.expected, result, "ExtractWorkflowPrivate mismatch for %q", tt.name) + }) + } +} + +// TestSpec_DesignDecision_StderrDiagnostics verifies that stdout is available for structured output. +// Spec: "All diagnostic output MUST go to stderr ... Structured output (JSON, hashes, graphs) goes to stdout." +// This test validates the documented design constraint is not violated by basic function signatures +// that return structured data (not printing to stdout directly). +func TestSpec_DesignDecision_StderrDiagnostics(t *testing.T) { + require.NotNil(t, t, "design constraint: functions returning structured data use return values, not stdout") + // ValidEngineNames, ValidArtifactSetNames return data as return values (not printed to stdout) + engines := ValidEngineNames() + assert.NotEmpty(t, engines, "ValidEngineNames returns data via return value, not stdout") + names := ValidArtifactSetNames() + assert.NotEmpty(t, names, "ValidArtifactSetNames returns data via return value, not stdout") +} +