diff --git a/README.md b/README.md index 1f2fe8c..f78ae80 100644 --- a/README.md +++ b/README.md @@ -84,10 +84,11 @@ hyperfleet-adapter/ │ └── logger/ # Structured logging with context support ├── internal/ │ ├── broker_consumer/ # Message broker consumer implementations -│ ├── config-loader/ # Configuration loading logic -│ ├── criteria/ # Precondition evaluation +│ ├── config_loader/ # Configuration loading logic +│ ├── criteria/ # Precondition and CEL evaluation +│ ├── executor/ # Event execution engine │ ├── hyperfleet_api/ # HyperFleet API client -│ └── k8s-objects/ # Kubernetes object management +│ └── k8s_client/ # Kubernetes client wrapper ├── test/ # Integration tests ├── charts/ # Helm chart for Kubernetes deployment ├── Dockerfile # Multi-stage Docker build diff --git a/configs/adapter-config-template.yaml b/configs/adapter-config-template.yaml index dce0659..11f7cd2 100644 --- a/configs/adapter-config-template.yaml +++ b/configs/adapter-config-template.yaml @@ -387,3 +387,9 @@ spec: method: "POST" url: "{{ .hyperfleetApiBaseUrl }}/api/{{ .hyperfleetApiVersion }}/clusters/{{ .clusterId }}/status" body: "{{ .clusterStatusPayload }}" + timeout: 30s + retryAttempts: 3 + retryBackoff: "exponential" + headers: + - name: "Content-Type" + value: "application/json" diff --git a/go.mod b/go.mod index 262f116..a7f79a9 100644 --- a/go.mod +++ b/go.mod @@ -8,6 +8,7 @@ require ( github.com/golang/glog v1.2.5 github.com/google/cel-go v0.26.1 github.com/google/uuid v1.6.0 + github.com/mitchellh/copystructure v1.2.0 github.com/openshift-hyperfleet/hyperfleet-broker v0.0.1 github.com/stretchr/testify v1.11.1 github.com/testcontainers/testcontainers-go v0.40.0 @@ -71,6 +72,7 @@ require ( github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect github.com/magiconair/properties v1.8.10 // indirect github.com/mailru/easyjson v0.7.7 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/go-archive v0.1.0 // indirect github.com/moby/patternmatcher v0.6.0 // indirect @@ -83,6 +85,8 @@ require ( github.com/morikuni/aec v1.0.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/oklog/ulid v1.3.1 // indirect + github.com/onsi/ginkgo/v2 v2.25.1 // indirect + github.com/onsi/gomega v1.38.2 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.1 // indirect github.com/pelletier/go-toml/v2 v2.2.4 // indirect diff --git a/go.sum b/go.sum index ed45f10..18ee4bd 100644 --- a/go.sum +++ b/go.sum @@ -20,6 +20,8 @@ github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6/go.mod h github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0= +github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/ThreeDotsLabs/watermill v1.5.1 h1:t5xMivyf9tpmU3iozPqyrCZXHvoV1XQDfihas4sV0fY= @@ -143,8 +145,8 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= -github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8= +github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -185,6 +187,10 @@ github.com/magiconair/properties v1.8.10 h1:s31yESBquKXCV9a/ScB3ESkOjUYYv+X0rg8S github.com/magiconair/properties v1.8.10/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/go-archive v0.1.0 h1:Kk/5rdW/g+H8NHdJW2gsXyZ7UnzvJNOy6VKJqueWdcQ= @@ -213,10 +219,10 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/onsi/ginkgo/v2 v2.22.0 h1:Yed107/8DjTr0lKCNt7Dn8yQ6ybuDRQoMGrNFKzMfHg= -github.com/onsi/ginkgo/v2 v2.22.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= -github.com/onsi/gomega v1.36.1 h1:bJDPBO7ibjxcbHMgSCoo4Yj18UWbKDlLwX1x9sybDcw= -github.com/onsi/gomega v1.36.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/onsi/ginkgo/v2 v2.25.1 h1:Fwp6crTREKM+oA6Cz4MsO8RhKQzs2/gOIVOUscMAfZY= +github.com/onsi/ginkgo/v2 v2.25.1/go.mod h1:ppTWQ1dh9KM/F1XgpeRqelR+zHVwV81DGRSDnFxK7Sk= +github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A= +github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= @@ -318,6 +324,8 @@ go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKr go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA= go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4= go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4= +go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= +go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= diff --git a/internal/config_loader/accessors.go b/internal/config_loader/accessors.go index 737a567..99e8074 100644 --- a/internal/config_loader/accessors.go +++ b/internal/config_loader/accessors.go @@ -32,7 +32,7 @@ func BuiltinVariables() []string { // - Built-in variables (metadata, now, date) // - Parameters from spec.params // - Captured variables from preconditions -// - Post params +// - Post payloads // - Resource aliases (resources.) func (c *AdapterConfig) GetDefinedVariables() map[string]bool { vars := make(map[string]bool) diff --git a/internal/config_loader/types.go b/internal/config_loader/types.go index db21cc4..6dc1cd0 100644 --- a/internal/config_loader/types.go +++ b/internal/config_loader/types.go @@ -46,8 +46,8 @@ type KubernetesConfig struct { APIVersion string `yaml:"apiVersion"` } -// Parameter represents a static parameter extraction configuration. -// Parameters are inputs extracted from external sources (event data, env vars). +// Parameter represents a parameter extraction configuration. +// Parameters are extracted from external sources (event data, env vars) using Source. type Parameter struct { Name string `yaml:"name"` Source string `yaml:"source,omitempty"` @@ -101,6 +101,7 @@ type Precondition struct { Capture []CaptureField `yaml:"capture,omitempty"` Conditions []Condition `yaml:"conditions,omitempty"` Expression string `yaml:"expression,omitempty"` + Log *LogAction `yaml:"log,omitempty"` } // APICall represents an API call configuration @@ -196,8 +197,15 @@ type PostConfig struct { // PostAction represents a post-processing action type PostAction struct { - Name string `yaml:"name"` - APICall *APICall `yaml:"apiCall,omitempty"` + Name string `yaml:"name"` + APICall *APICall `yaml:"apiCall,omitempty"` + Log *LogAction `yaml:"log,omitempty"` +} + +// LogAction represents a logging action that can be configured in the adapter config +type LogAction struct { + Message string `yaml:"message"` + Level string `yaml:"level,omitempty"` // debug, info, warning, error (default: info) } // ManifestRef represents a manifest reference diff --git a/internal/config_loader/validator_schema.go b/internal/config_loader/validator_schema.go index 7b670bb..09d0622 100644 --- a/internal/config_loader/validator_schema.go +++ b/internal/config_loader/validator_schema.go @@ -4,11 +4,18 @@ import ( "fmt" "os" "path/filepath" + "regexp" "strings" "gopkg.in/yaml.v3" ) +// validResourceNameRegex validates resource names for CEL compatibility. +// Allows snake_case (my_resource) and camelCase (myResource). +// Must start with lowercase letter, can contain letters, numbers, underscores. +// Hyphens (kebab-case) are NOT allowed as they conflict with CEL's minus operator. +var validResourceNameRegex = regexp.MustCompile(`^[a-z][a-zA-Z0-9_]*$`) + // ----------------------------------------------------------------------------- // SchemaValidator // ----------------------------------------------------------------------------- @@ -142,6 +149,8 @@ func (v *SchemaValidator) validatePreconditions() error { } func (v *SchemaValidator) validateResources() error { + seen := make(map[string]bool) + for i, resource := range v.config.Spec.Resources { path := fmt.Sprintf("%s.%s[%d]", FieldSpec, FieldResources, i) @@ -149,6 +158,18 @@ func (v *SchemaValidator) validateResources() error { return fmt.Errorf("%s.%s is required", path, FieldName) } + // Validate resource name format for CEL compatibility + // Allows snake_case and camelCase, but NOT kebab-case (hyphens conflict with CEL minus operator) + if !validResourceNameRegex.MatchString(resource.Name) { + return fmt.Errorf("%s.%s %q: must start with lowercase letter and contain only letters, numbers, underscores (no hyphens)", path, FieldName, resource.Name) + } + + // Check for duplicate resource names + if seen[resource.Name] { + return fmt.Errorf("%s.%s %q: duplicate resource name", path, FieldName, resource.Name) + } + seen[resource.Name] = true + if resource.Manifest == nil { return fmt.Errorf("%s (%s): %s is required", path, resource.Name, FieldManifest) } diff --git a/internal/config_loader/validator_test.go b/internal/config_loader/validator_test.go index 9221cb1..12d943d 100644 --- a/internal/config_loader/validator_test.go +++ b/internal/config_loader/validator_test.go @@ -3,10 +3,9 @@ package config_loader import ( "testing" + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/criteria" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - - "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/criteria" ) func TestValidateConditionOperators(t *testing.T) { diff --git a/internal/criteria/README.md b/internal/criteria/README.md index fa0bc3b..770277b 100644 --- a/internal/criteria/README.md +++ b/internal/criteria/README.md @@ -41,7 +41,7 @@ ctx.Set("provider", "aws") ctx.Set("nodeCount", 5) // Create evaluator -evaluator := criteria.NewEvaluator(ctx) +evaluator := criteria.NewEvaluator(ctx, log) // Evaluate a single condition result, err := evaluator.EvaluateCondition( @@ -132,7 +132,7 @@ ctx.Set("clusterPhase", "Ready") ctx.Set("cloudProvider", "aws") ctx.Set("vpcId", "vpc-12345") -evaluator := criteria.NewEvaluator(ctx) +evaluator := criteria.NewEvaluator(ctx, log) // Validate cluster is in correct phase phaseValid, _ := evaluator.EvaluateCondition( @@ -179,7 +179,7 @@ ctx.Set("resources", map[string]interface{}{ }, }) -evaluator := criteria.NewEvaluator(ctx) +evaluator := criteria.NewEvaluator(ctx, log) // Check namespace is active nsActive, _ := evaluator.EvaluateCondition( @@ -204,7 +204,7 @@ if nsActive && allReady { ```go ctx := criteria.NewEvaluationContext() -evaluator := criteria.NewEvaluator(ctx) +evaluator := criteria.NewEvaluator(ctx, log) // String contains ctx.Set("message", "Deployment ready and healthy") @@ -233,7 +233,7 @@ ctx.Set("nodeCount", 5) ctx.Set("minNodes", 1) ctx.Set("maxNodes", 10) -evaluator := criteria.NewEvaluator(ctx) +evaluator := criteria.NewEvaluator(ctx, log) // Check if within range aboveMin, _ := evaluator.EvaluateCondition( @@ -276,7 +276,7 @@ ctx.Set("cloudProvider", "aws") ctx.Set("vpcId", "vpc-12345") // Evaluate precondition conditions -evaluator := criteria.NewEvaluator(ctx) +evaluator := criteria.NewEvaluator(ctx, log) conditions := make([]criteria.ConditionDef, len(precond.Conditions)) for i, cond := range precond.Conditions { conditions[i] = criteria.ConditionDef{ @@ -306,7 +306,7 @@ The package provides descriptive error messages: ctx := criteria.NewEvaluationContext() ctx.Set("count", "not a number") -evaluator := criteria.NewEvaluator(ctx) +evaluator := criteria.NewEvaluator(ctx, log) result, err := evaluator.EvaluateCondition( "count", criteria.OperatorGreaterThan, diff --git a/internal/criteria/cel_evaluator.go b/internal/criteria/cel_evaluator.go index e5a9c8e..74350aa 100644 --- a/internal/criteria/cel_evaluator.go +++ b/internal/criteria/cel_evaluator.go @@ -6,16 +6,18 @@ import ( "strconv" "strings" - "github.com/golang/glog" "github.com/google/cel-go/cel" "github.com/google/cel-go/common/types" "github.com/google/cel-go/common/types/ref" + apperrors "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/errors" + "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/logger" ) // CELEvaluator evaluates CEL expressions against a context type CELEvaluator struct { env *cel.Env context *EvaluationContext + log logger.Logger } // CELResult contains the result of evaluating a CEL expression. @@ -27,8 +29,9 @@ type CELResult struct { // Matched indicates if the result is boolean true (for conditions) // Always false when Error is set Matched bool - // Type is the CEL type of the result ("error" when evaluation failed) - Type string + // ValueType is the CEL type of Value (e.g., "bool", "string", "int", "map", "list") + // Empty when evaluation failed + ValueType string // Expression is the original expression that was evaluated Expression string // Error indicates if evaluation failed (nil if successful) @@ -49,7 +52,7 @@ func (r *CELResult) IsSuccess() bool { } // NewCELEvaluator creates a new CEL evaluator with the given context -func NewCELEvaluator(ctx *EvaluationContext) (*CELEvaluator, error) { +func NewCELEvaluator(ctx *EvaluationContext, log logger.Logger) (*CELEvaluator, error) { if ctx == nil { ctx = NewEvaluationContext() } @@ -59,24 +62,27 @@ func NewCELEvaluator(ctx *EvaluationContext) (*CELEvaluator, error) { env, err := cel.NewEnv(options...) if err != nil { - return nil, fmt.Errorf("failed to create CEL environment: %w", err) + return nil, apperrors.NewCELEnvError("failed to initialize", err) } return &CELEvaluator{ env: env, context: ctx, + log: log, }, nil } // buildCELOptions creates CEL environment options from the context -// Variables are dynamically registered based on what's in ctx.Data +// Variables are dynamically registered based on what's in ctx.Data() func buildCELOptions(ctx *EvaluationContext) []cel.EnvOption { options := make([]cel.EnvOption, 0) // Enable optional types for optional chaining syntax (e.g., a.?b.?c) options = append(options, cel.OptionalTypes()) - for key, value := range ctx.Data { + // Get a snapshot of the data for thread safety + data := ctx.Data() + for key, value := range data { celType := inferCELType(value) options = append(options, cel.Variable(key, celType)) } @@ -110,148 +116,146 @@ func inferCELType(value interface{}) *cel.Type { } } -// Evaluate evaluates a CEL expression and returns the result. -// Returns an error if evaluation fails. Use EvaluateSafe for error-tolerant evaluation. -func (e *CELEvaluator) Evaluate(expression string) (*CELResult, error) { - result := e.EvaluateSafe(expression) - if result.Error != nil { - return nil, result.Error - } - return result, nil -} - -// EvaluateSafe evaluates a CEL expression and captures any errors in the result. -// This never returns an error - all errors are captured in CELResult.Error and CELResult.ErrorReason. -// Use this when you want to handle evaluation failures gracefully at a higher level. +// EvaluateSafe evaluates a CEL expression with safe handling for evaluation errors. +// +// Error handling strategy: +// - Parse errors: returned as error (fail fast - indicates bug in expression) +// - Program creation errors: returned as error (fail fast - indicates invalid expression) +// - Evaluation errors: captured in CELResult.Error (safe - data might not exist yet) // -// Common error reasons include: +// Use this when you expect that some fields might not exist or be null, and you want +// to handle those cases gracefully (e.g., treat as "not matched") rather than failing. +// +// Common evaluation error reasons captured in result: // - "field not found": when accessing a key that doesn't exist (e.g., data.missing.field) // - "null value access": when accessing a field on a null value // - "type mismatch": when operations are applied to incompatible types -func (e *CELEvaluator) EvaluateSafe(expression string) *CELResult { +func (e *CELEvaluator) EvaluateSafe(expression string) (*CELResult, error) { expression = strings.TrimSpace(expression) if expression == "" { return &CELResult{ Value: true, Matched: true, - Type: "bool", + ValueType: "bool", Expression: expression, - } + }, nil } - // Parse the expression + // Parse the expression - errors here indicate bugs in configuration ast, issues := e.env.Parse(expression) if issues != nil && issues.Err() != nil { - return &CELResult{ - Value: nil, - Matched: false, - Type: "error", - Expression: expression, - Error: fmt.Errorf("CEL parse error: %w", issues.Err()), - ErrorReason: fmt.Sprintf("parse error: %s", issues.Err()), - } + return nil, apperrors.NewCELParseError(expression, issues.Err()) } - // Type-check the expression (optional, may fail for dynamic types) - checked, issues := e.env.Check(ast) - if issues != nil && issues.Err() != nil { - glog.V(2).Infof("CEL type check failed for expression %q (using parsed AST): %v", expression, issues.Err()) - // Use parsed AST if type checking fails (common with dynamic types) - checked = ast + // Safety check: ensure AST is valid after parse + if ast == nil { + return nil, apperrors.NewCELParseError(expression, nil) } - // Create the program - prg, err := e.env.Program(checked) + // Create the program directly from parsed AST + // Skip type-check: we use DynType, so type errors are caught during evaluation + prg, err := e.env.Program(ast) if err != nil { - return &CELResult{ - Value: nil, - Matched: false, - Type: "error", - Expression: expression, - Error: fmt.Errorf("CEL program creation error: %w", err), - ErrorReason: fmt.Sprintf("program error: %s", err), - } + return nil, apperrors.NewCELProgramError(expression, err) } - // Evaluate the expression - out, _, err := prg.Eval(e.context.Data) + // Evaluate the expression - errors here are SAFE (data might not exist yet) + // Get a snapshot of the data for thread-safe evaluation + out, _, err := prg.Eval(e.context.Data()) if err != nil { - // Capture evaluation error - this includes "no such key" errors - // Error is logged at debug level and captured in result for executor to handle - errorReason := categorizeEvalError(err) - glog.V(2).Infof("CEL evaluation failed for %q: %s (%v)", expression, errorReason, err) + // Capture evaluation error in result - this is the "safe" part + // These errors are expected when data fields don't exist yet + if e.log != nil { + e.log.V(2).Infof("CEL evaluation failed for %q: %v", expression, err) + } return &CELResult{ Value: nil, Matched: false, - Type: "error", Expression: expression, - Error: fmt.Errorf("CEL evaluation error: %w", err), - ErrorReason: errorReason, - } + Error: apperrors.NewCELEvalError(expression, err), + ErrorReason: err.Error(), + }, nil // No error returned - evaluation errors are captured in result } // Convert result result := &CELResult{ Value: out.Value(), - Type: out.Type().TypeName(), + ValueType: out.Type().TypeName(), Expression: expression, } // Check if result is boolean true + // This is the most common use case for CEL expressions + // has("result.value") will result the value to bool if boolVal, ok := out.Value().(bool); ok { result.Matched = boolVal } else { // Non-boolean results are considered "matched" if not nil/empty + // This can used to dig values from the result + // For example, if the result is a map, you can use result.value.key to get the value of the key result.Matched = !isEmptyValue(out) } - return result + return result, nil } -// categorizeEvalError provides a human-readable error reason for common CEL evaluation errors -func categorizeEvalError(err error) string { - errStr := err.Error() - if strings.Contains(errStr, "no such key") { - return "field not found" - } - if strings.Contains(errStr, "no such attribute") { - return "attribute not found" +// EvaluateAs evaluates a CEL expression and returns the result as the specified type. +// This is a type-safe generic function that handles all type assertions properly. +// Returns an error if: +// - Parse/program error occurs (from EvaluateSafe) +// - Evaluation error occurs (captured in result.Error) +// - Type assertion fails (returns CELTypeMismatchError) +func EvaluateAs[T any](e *CELEvaluator, expression string) (T, error) { + var zero T + result, err := e.EvaluateSafe(expression) + if err != nil { + return zero, err } - if strings.Contains(errStr, "null") || strings.Contains(errStr, "nil") { - return "null value access" + if result.Error != nil { + return zero, result.Error } - if strings.Contains(errStr, "type") { - return "type mismatch" + + val, ok := result.Value.(T) + if !ok { + return zero, apperrors.NewCELTypeMismatchError(expression, + fmt.Sprintf("%T", zero), fmt.Sprintf("%T", result.Value)) } - return fmt.Sprintf("evaluation failed: %s", errStr) + return val, nil } -// EvaluateBool evaluates a CEL expression that should return a boolean +// EvaluateBool evaluates a CEL expression that should return a boolean. func (e *CELEvaluator) EvaluateBool(expression string) (bool, error) { - result, err := e.Evaluate(expression) - if err != nil { - return false, err - } + return EvaluateAs[bool](e, expression) +} - if boolVal, ok := result.Value.(bool); ok { - return boolVal, nil - } +// EvaluateString evaluates a CEL expression that should return a string. +func (e *CELEvaluator) EvaluateString(expression string) (string, error) { + return EvaluateAs[string](e, expression) +} - return result.Matched, nil +// EvaluateInt evaluates a CEL expression that should return an int64. +func (e *CELEvaluator) EvaluateInt(expression string) (int64, error) { + return EvaluateAs[int64](e, expression) } -// EvaluateString evaluates a CEL expression that should return a string -func (e *CELEvaluator) EvaluateString(expression string) (string, error) { - result, err := e.Evaluate(expression) - if err != nil { - return "", err - } +// EvaluateUint evaluates a CEL expression that should return a uint64. +func (e *CELEvaluator) EvaluateUint(expression string) (uint64, error) { + return EvaluateAs[uint64](e, expression) +} - if strVal, ok := result.Value.(string); ok { - return strVal, nil - } +// EvaluateFloat64 evaluates a CEL expression that should return a float64. +func (e *CELEvaluator) EvaluateFloat64(expression string) (float64, error) { + return EvaluateAs[float64](e, expression) +} + +// EvaluateArray evaluates a CEL expression that should return a slice. +func (e *CELEvaluator) EvaluateArray(expression string) ([]any, error) { + return EvaluateAs[[]any](e, expression) +} - return fmt.Sprintf("%v", result.Value), nil +// EvaluateMap evaluates a CEL expression that should return a map. +func (e *CELEvaluator) EvaluateMap(expression string) (map[string]any, error) { + return EvaluateAs[map[string]any](e, expression) } // isEmptyValue checks if a CEL value is empty/nil @@ -313,7 +317,7 @@ func ConditionToCEL(field, operator string, value interface{}) (string, error) { // For top-level variables, check not null and not empty string return fmt.Sprintf("(%s != null && %s != \"\")", field, field), nil default: - return "", fmt.Errorf("unsupported operator for CEL conversion: %s", operator) + return "", apperrors.NewCELUnsupportedOperatorError(operator) } } @@ -359,7 +363,7 @@ func formatCELValue(value interface{}) (string, error) { } return fmt.Sprintf("[%s]", strings.Join(items, ", ")), nil default: - return "", fmt.Errorf("unsupported type for CEL formatting: %T", value) + return "", apperrors.NewCELUnsupportedTypeError(fmt.Sprintf("%T", value)) } } } @@ -374,7 +378,7 @@ func ConditionsToCEL(conditions []ConditionDef) (string, error) { for i, cond := range conditions { expr, err := ConditionToCEL(cond.Field, string(cond.Operator), cond.Value) if err != nil { - return "", fmt.Errorf("failed to convert condition %d: %w", i, err) + return "", apperrors.NewCELConditionConversionError(i, err) } expressions[i] = "(" + expr + ")" } diff --git a/internal/criteria/cel_evaluator_test.go b/internal/criteria/cel_evaluator_test.go index 3b64d92..3a6093c 100644 --- a/internal/criteria/cel_evaluator_test.go +++ b/internal/criteria/cel_evaluator_test.go @@ -13,7 +13,7 @@ func TestNewCELEvaluator(t *testing.T) { ctx.Set("status", "Ready") ctx.Set("replicas", 3) - evaluator, err := NewCELEvaluator(ctx) + evaluator, err := NewCELEvaluator(ctx, nil) require.NoError(t, err) require.NotNil(t, evaluator) } @@ -25,7 +25,7 @@ func TestCELEvaluatorEvaluate(t *testing.T) { ctx.Set("provider", "aws") ctx.Set("enabled", true) - evaluator, err := NewCELEvaluator(ctx) + evaluator, err := NewCELEvaluator(ctx, nil) require.NoError(t, err) tests := []struct { @@ -98,12 +98,19 @@ func TestCELEvaluatorEvaluate(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - result, err := evaluator.Evaluate(tt.expression) + result, err := evaluator.EvaluateSafe(tt.expression) if tt.wantErr { - assert.Error(t, err) + // Parse errors are returned as error, eval errors in result + if err != nil { + assert.Error(t, err) + return + } + // Evaluation error captured in result + assert.True(t, result.HasError()) return } require.NoError(t, err) + assert.False(t, result.HasError()) assert.Equal(t, tt.wantMatch, result.Matched) assert.Equal(t, tt.wantValue, result.Value) assert.Equal(t, tt.expression, result.Expression) @@ -122,17 +129,19 @@ func TestCELEvaluatorWithNestedData(t *testing.T) { }, }) - evaluator, err := NewCELEvaluator(ctx) + evaluator, err := NewCELEvaluator(ctx, nil) require.NoError(t, err) // Test nested field access - result, err := evaluator.Evaluate(`cluster.status.phase == "Ready"`) + result, err := evaluator.EvaluateSafe(`cluster.status.phase == "Ready"`) require.NoError(t, err) + assert.False(t, result.HasError()) assert.True(t, result.Matched) // Test nested numeric comparison - result, err = evaluator.Evaluate(`cluster.spec.replicas > 1`) + result, err = evaluator.EvaluateSafe(`cluster.spec.replicas > 1`) require.NoError(t, err) + assert.False(t, result.HasError()) assert.True(t, result.Matched) } @@ -145,11 +154,12 @@ func TestCELEvaluatorEvaluateSafe(t *testing.T) { }) ctx.Set("nullValue", nil) - evaluator, err := NewCELEvaluator(ctx) + evaluator, err := NewCELEvaluator(ctx, nil) require.NoError(t, err) t.Run("successful evaluation", func(t *testing.T) { - result := evaluator.EvaluateSafe(`cluster.status.phase == "Ready"`) + result, err := evaluator.EvaluateSafe(`cluster.status.phase == "Ready"`) + require.NoError(t, err, "EvaluateSafe should not return error for valid expression") assert.True(t, result.IsSuccess()) assert.False(t, result.HasError()) assert.True(t, result.Matched) @@ -157,47 +167,54 @@ func TestCELEvaluatorEvaluateSafe(t *testing.T) { assert.Empty(t, result.ErrorReason) }) - t.Run("missing field returns error in result", func(t *testing.T) { - result := evaluator.EvaluateSafe(`cluster.nonexistent.field == "test"`) + t.Run("missing field returns error in result (safe)", func(t *testing.T) { + // Evaluation errors (missing fields) are captured in result, NOT returned as error + result, err := evaluator.EvaluateSafe(`cluster.nonexistent.field == "test"`) + require.NoError(t, err, "EvaluateSafe should not return error for evaluation errors") assert.True(t, result.HasError()) assert.False(t, result.IsSuccess()) assert.False(t, result.Matched) assert.NotNil(t, result.Error) - assert.Contains(t, result.ErrorReason, "not found") + assert.Contains(t, result.ErrorReason, "no such key") }) - t.Run("access field on null returns error in result", func(t *testing.T) { - result := evaluator.EvaluateSafe(`nullValue.field == "test"`) + t.Run("access field on null returns error in result (safe)", func(t *testing.T) { + result, err := evaluator.EvaluateSafe(`nullValue.field == "test"`) + require.NoError(t, err, "EvaluateSafe should not return error for null access") assert.True(t, result.HasError()) assert.False(t, result.Matched) assert.NotNil(t, result.Error) }) - t.Run("has() on missing intermediate key returns error", func(t *testing.T) { + t.Run("has() on missing intermediate key returns error in result", func(t *testing.T) { // Without preprocessing, has(cluster.nonexistent.field) errors // because cluster.nonexistent doesn't exist - result := evaluator.EvaluateSafe(`has(cluster.nonexistent.field)`) + result, err := evaluator.EvaluateSafe(`has(cluster.nonexistent.field)`) + require.NoError(t, err) assert.True(t, result.HasError()) assert.False(t, result.Matched) - assert.Contains(t, result.ErrorReason, "not found") + assert.Contains(t, result.ErrorReason, "no such key") }) t.Run("has() on existing intermediate key returns false for missing leaf", func(t *testing.T) { // has(cluster.status.missing) - cluster.status exists, but missing doesn't - result := evaluator.EvaluateSafe(`has(cluster.status.missing)`) + result, err := evaluator.EvaluateSafe(`has(cluster.status.missing)`) + require.NoError(t, err) assert.True(t, result.IsSuccess()) assert.False(t, result.Matched) // false because field doesn't exist assert.Nil(t, result.Error) }) t.Run("empty expression returns true", func(t *testing.T) { - result := evaluator.EvaluateSafe("") + result, err := evaluator.EvaluateSafe("") + require.NoError(t, err) assert.True(t, result.IsSuccess()) assert.True(t, result.Matched) }) t.Run("error result can be used for conditional logic", func(t *testing.T) { - result := evaluator.EvaluateSafe(`cluster.missing.path == "value"`) + result, err := evaluator.EvaluateSafe(`cluster.missing.path == "value"`) + require.NoError(t, err, "Evaluation errors should be captured, not returned") // You can use the result for conditional logic var finalValue interface{} @@ -214,13 +231,21 @@ func TestCELEvaluatorEvaluateSafe(t *testing.T) { assert.Nil(t, finalValue) assert.NotEmpty(t, reason) }) + + t.Run("parse error returns actual error (not safe)", func(t *testing.T) { + // Parse errors should be returned as actual errors - they indicate bugs + result, err := evaluator.EvaluateSafe(`invalid syntax ===`) + assert.Error(t, err, "Parse errors should be returned as errors") + assert.Nil(t, result) + assert.Contains(t, err.Error(), "parse error") + }) } func TestCELEvaluatorEvaluateBool(t *testing.T) { ctx := NewEvaluationContext() ctx.Set("status", "Ready") - evaluator, err := NewCELEvaluator(ctx) + evaluator, err := NewCELEvaluator(ctx, nil) require.NoError(t, err) // True result @@ -239,7 +264,7 @@ func TestCELEvaluatorEvaluateString(t *testing.T) { ctx.Set("status", "Ready") ctx.Set("name", "test-cluster") - evaluator, err := NewCELEvaluator(ctx) + evaluator, err := NewCELEvaluator(ctx, nil) require.NoError(t, err) // String result @@ -415,7 +440,7 @@ func TestEvaluatorCELIntegration(t *testing.T) { ctx.Set("replicas", 3) ctx.Set("provider", "aws") - evaluator := NewEvaluator(ctx) + evaluator := NewEvaluator(ctx, nil) // Test EvaluateCEL result, err := evaluator.EvaluateCEL(`status == "Ready" && replicas > 1`) @@ -444,7 +469,7 @@ func TestEvaluatorCELIntegration(t *testing.T) { func TestGetCELExpression(t *testing.T) { ctx := NewEvaluationContext() - evaluator := NewEvaluator(ctx) + evaluator := NewEvaluator(ctx, nil) // Single condition expr, err := evaluator.GetCELExpression("status", OperatorEquals, "Ready") @@ -507,7 +532,7 @@ func TestEvaluateSafeErrorHandling(t *testing.T) { }, }) - evaluator, err := NewCELEvaluator(ctx) + evaluator, err := NewCELEvaluator(ctx, nil) require.NoError(t, err) tests := []struct { @@ -527,13 +552,13 @@ func TestEvaluateSafeErrorHandling(t *testing.T) { name: "missing leaf field", expression: `data.level1.level2.missing == "test"`, wantSuccess: false, - wantReason: "not found", + wantReason: "no such key", }, { name: "missing intermediate field", expression: `data.level1.nonexistent.value == "test"`, wantSuccess: false, - wantReason: "not found", + wantReason: "no such key", }, { name: "has() on existing path", @@ -551,13 +576,14 @@ func TestEvaluateSafeErrorHandling(t *testing.T) { name: "has() on missing intermediate", expression: `has(data.level1.nonexistent.value)`, wantSuccess: false, - wantReason: "not found", + wantReason: "no such key", }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - result := evaluator.EvaluateSafe(tt.expression) + result, err := evaluator.EvaluateSafe(tt.expression) + require.NoError(t, err, "EvaluateSafe should not return parse/program errors for valid expressions") if tt.wantSuccess { assert.True(t, result.IsSuccess(), "expected success but got error: %v", result.Error) diff --git a/internal/criteria/evaluator.go b/internal/criteria/evaluator.go index b3629c9..9dc285e 100644 --- a/internal/criteria/evaluator.go +++ b/internal/criteria/evaluator.go @@ -6,6 +6,8 @@ import ( "reflect" "strings" "sync" + + "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/logger" ) // EvaluationResult contains the result of evaluating a condition @@ -37,30 +39,47 @@ type ConditionsResult struct { // Evaluator evaluates criteria against an evaluation context type Evaluator struct { context *EvaluationContext + log logger.Logger // Lazily cached CEL evaluator for repeated CEL evaluations - celEval *CELEvaluator - celEvalOnce sync.Once - celEvalErr error + // Recreated when context version changes + celEval *CELEvaluator + celEvalVersion int64 // Track which context version the CEL eval was created with + mu sync.Mutex } // NewEvaluator creates a new criteria evaluator -func NewEvaluator(ctx *EvaluationContext) *Evaluator { +func NewEvaluator(ctx *EvaluationContext, log logger.Logger) *Evaluator { if ctx == nil { ctx = NewEvaluationContext() } return &Evaluator{ context: ctx, + log: log, } } // getCELEvaluator returns a cached CEL evaluator, creating it lazily on first use. -// This avoids creating a new CEL environment for each evaluation. +// If the context has been modified (version changed), the CEL evaluator is recreated +// to ensure the CEL environment stays in sync with the context data. +// This prevents "undeclared reference" errors when variables are added after first evaluation. func (e *Evaluator) getCELEvaluator() (*CELEvaluator, error) { - e.celEvalOnce.Do(func() { - e.celEval, e.celEvalErr = NewCELEvaluator(e.context) - }) - return e.celEval, e.celEvalErr + e.mu.Lock() + defer e.mu.Unlock() + + currentVersion := e.context.Version() + + // Recreate CEL evaluator if context changed or not yet created + if e.celEval == nil || e.celEvalVersion != currentVersion { + celEval, err := NewCELEvaluator(e.context, e.log) + if err != nil { + return nil, err + } + e.celEval = celEval + e.celEvalVersion = currentVersion + } + + return e.celEval, nil } // GetField extracts a field value from the context using dot notation @@ -128,36 +147,21 @@ func (e *Evaluator) EvaluateConditionWithResult(field string, operator Operator, // Evaluate based on operator var matched bool - switch operator { - case OperatorEquals: - matched, err = evaluateEquals(fieldValue, value) - case OperatorNotEquals: - matched, err = evaluateEquals(fieldValue, value) - matched = !matched - case OperatorIn: - matched, err = evaluateIn(fieldValue, value) - case OperatorNotIn: - matched, err = evaluateIn(fieldValue, value) - matched = !matched - case OperatorContains: - matched, err = evaluateContains(fieldValue, value) - case OperatorGreaterThan: - matched, err = evaluateGreaterThan(fieldValue, value) - case OperatorLessThan: - matched, err = evaluateLessThan(fieldValue, value) - case OperatorExists: + if operator == OperatorExists { + // Exists is special - only checks fieldValue, no expected value matched = evaluateExists(fieldValue) - default: + } else if evalFn, ok := operatorFuncs[operator]; ok { + matched, err = evalFn(fieldValue, value) + if err != nil { + return nil, err + } + } else { return nil, &EvaluationError{ Field: field, Message: fmt.Sprintf("unsupported operator: %s", operator), } } - if err != nil { - return nil, err - } - result.Matched = matched return result, nil } @@ -231,31 +235,35 @@ func (e *Evaluator) ExtractFieldsOrDefault(fields map[string]interface{}) map[st return extracted } -// EvaluateCEL evaluates a CEL expression against the current context -func (e *Evaluator) EvaluateCEL(expression string) (*CELResult, error) { +// withCELEvaluator gets the CEL evaluator and applies a function to it +func withCELEvaluator[T any](e *Evaluator, fn func(*CELEvaluator) (T, error)) (T, error) { + var zero T celEval, err := e.getCELEvaluator() if err != nil { - return nil, err + return zero, err } - return celEval.Evaluate(expression) + return fn(celEval) +} + +// EvaluateCEL evaluates a CEL expression against the current context +func (e *Evaluator) EvaluateCEL(expression string) (*CELResult, error) { + return withCELEvaluator(e, func(c *CELEvaluator) (*CELResult, error) { + return c.EvaluateSafe(expression) + }) } // EvaluateCELBool evaluates a CEL expression that returns a boolean func (e *Evaluator) EvaluateCELBool(expression string) (bool, error) { - celEval, err := e.getCELEvaluator() - if err != nil { - return false, err - } - return celEval.EvaluateBool(expression) + return withCELEvaluator(e, func(c *CELEvaluator) (bool, error) { + return c.EvaluateBool(expression) + }) } // EvaluateCELString evaluates a CEL expression that returns a string func (e *Evaluator) EvaluateCELString(expression string) (string, error) { - celEval, err := e.getCELEvaluator() - if err != nil { - return "", err - } - return celEval.EvaluateString(expression) + return withCELEvaluator(e, func(c *CELEvaluator) (string, error) { + return c.EvaluateString(expression) + }) } // EvaluateConditionAsCEL converts a condition to CEL and evaluates it @@ -286,7 +294,6 @@ func (e *Evaluator) GetCELExpressionForConditions(conditions []ConditionDef) (st return ConditionsToCEL(conditions) } -// ConditionDef represents a condition definition // ConditionDef defines a condition to evaluate type ConditionDef struct { Field string @@ -310,6 +317,28 @@ func (c ConditionDefJSON) ToConditionDef() ConditionDef { } } +// evalFunc is a function type for operator evaluation +type evalFunc func(fieldValue, expected interface{}) (bool, error) + +// operatorFuncs maps operators to their evaluation functions +var operatorFuncs = map[Operator]evalFunc{ + OperatorEquals: evaluateEquals, + OperatorNotEquals: negate(evaluateEquals), + OperatorIn: evaluateIn, + OperatorNotIn: negate(evaluateIn), + OperatorContains: evaluateContains, + OperatorGreaterThan: evaluateGreaterThan, + OperatorLessThan: evaluateLessThan, +} + +// negate wraps an evalFunc to return the opposite result +func negate(fn evalFunc) evalFunc { + return func(a, b interface{}) (bool, error) { + result, err := fn(a, b) + return !result, err + } +} + // evaluateEquals checks if two values are equal func evaluateEquals(fieldValue, expectedValue interface{}) (bool, error) { // Handle nil cases @@ -477,31 +506,14 @@ func compareNumbers(a, b interface{}, compare func(float64, float64) bool) (bool // toFloat64 converts various numeric types to float64 func toFloat64(value interface{}) (float64, error) { - switch v := value.(type) { - case float64: - return v, nil - case float32: - return float64(v), nil - case int: - return float64(v), nil - case int8: - return float64(v), nil - case int16: - return float64(v), nil - case int32: - return float64(v), nil - case int64: - return float64(v), nil - case uint: - return float64(v), nil - case uint8: - return float64(v), nil - case uint16: - return float64(v), nil - case uint32: - return float64(v), nil - case uint64: - return float64(v), nil + v := reflect.ValueOf(value) + switch v.Kind() { + case reflect.Float32, reflect.Float64: + return v.Float(), nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return float64(v.Int()), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return float64(v.Uint()), nil default: return 0, fmt.Errorf("cannot convert %T to float64", value) } diff --git a/internal/criteria/evaluator_scenarios_test.go b/internal/criteria/evaluator_scenarios_test.go index 31e3355..5ed53f2 100644 --- a/internal/criteria/evaluator_scenarios_test.go +++ b/internal/criteria/evaluator_scenarios_test.go @@ -42,7 +42,7 @@ func TestRealWorldScenario(t *testing.T) { ctx.Set("vpcId", "vpc-12345") ctx.Set("nodeCount", 5) - evaluator := NewEvaluator(ctx) + evaluator := NewEvaluator(ctx, nil) // Test precondition conditions from the template t.Run("clusterPhase in valid phases", func(t *testing.T) { @@ -116,7 +116,7 @@ func TestResourceStatusEvaluation(t *testing.T) { ctx.Set("resources", resources) - evaluator := NewEvaluator(ctx) + evaluator := NewEvaluator(ctx, nil) t.Run("namespace is active", func(t *testing.T) { result, err := evaluator.EvaluateCondition( @@ -133,7 +133,7 @@ func TestResourceStatusEvaluation(t *testing.T) { localCtx := NewEvaluationContext() localCtx.Set("replicas", 3) localCtx.Set("readyReplicas", 3) - localEvaluator := NewEvaluator(localCtx) + localEvaluator := NewEvaluator(localCtx, nil) result, err := localEvaluator.EvaluateCondition( "replicas", @@ -166,7 +166,7 @@ func TestComplexNestedConditions(t *testing.T) { }, }) - evaluator := NewEvaluator(ctx) + evaluator := NewEvaluator(ctx, nil) t.Run("adapter execution successful", func(t *testing.T) { result, err := evaluator.EvaluateCondition( @@ -206,7 +206,7 @@ func TestMapKeyContainment(t *testing.T) { "owner": "team-a", }) - evaluator := NewEvaluator(ctx) + evaluator := NewEvaluator(ctx, nil) t.Run("map contains key - found", func(t *testing.T) { result, err := evaluator.EvaluateCondition( @@ -257,7 +257,7 @@ func TestTerminatingClusterScenario(t *testing.T) { ctx.Set("cloudProvider", "aws") ctx.Set("vpcId", "vpc-12345") - evaluator := NewEvaluator(ctx) + evaluator := NewEvaluator(ctx, nil) t.Run("terminating cluster fails preconditions", func(t *testing.T) { // Cluster in "Terminating" phase should NOT be in allowed phases @@ -337,7 +337,7 @@ func TestNodeCountValidation(t *testing.T) { ctx.Set("minNodes", tt.minNodes) ctx.Set("maxNodes", tt.maxNodes) - evaluator := NewEvaluator(ctx) + evaluator := NewEvaluator(ctx, nil) // Check if nodeCount >= minNodes result1, err := evaluator.EvaluateCondition( diff --git a/internal/criteria/evaluator_test.go b/internal/criteria/evaluator_test.go index 1f6b5b1..dc37710 100644 --- a/internal/criteria/evaluator_test.go +++ b/internal/criteria/evaluator_test.go @@ -11,7 +11,7 @@ import ( func TestNewEvaluationContext(t *testing.T) { ctx := NewEvaluationContext() assert.NotNil(t, ctx) - assert.NotNil(t, ctx.Data) + assert.NotNil(t, ctx.Data()) } func TestEvaluationContextSetGet(t *testing.T) { @@ -110,6 +110,50 @@ func TestEvaluationContextMerge(t *testing.T) { assert.Equal(t, "value3", val) } +// TestEvaluationContextMergeConcurrent verifies that concurrent cross-merges +// don't cause deadlock. Previously, if goroutine A called ctx1.Merge(ctx2) while +// goroutine B called ctx2.Merge(ctx1), a deadlock could occur due to lock ordering. +// The fix snapshots other's data before acquiring the write lock. +func TestEvaluationContextMergeConcurrent(t *testing.T) { + ctx1 := NewEvaluationContext() + ctx2 := NewEvaluationContext() + + // Initialize with different data + ctx1.Set("from1", "value1") + ctx2.Set("from2", "value2") + + done := make(chan bool, 2) + + // Goroutine A: ctx1.Merge(ctx2) + go func() { + for i := 0; i < 100; i++ { + ctx1.Merge(ctx2) + } + done <- true + }() + + // Goroutine B: ctx2.Merge(ctx1) - would deadlock with old implementation + go func() { + for i := 0; i < 100; i++ { + ctx2.Merge(ctx1) + } + done <- true + }() + + // Wait for both goroutines (with timeout via test timeout) + <-done + <-done + + // Both contexts should have both keys + val1, ok1 := ctx1.Get("from2") + assert.True(t, ok1, "ctx1 should have from2 after merge") + assert.Equal(t, "value2", val1) + + val2, ok2 := ctx2.Get("from1") + assert.True(t, ok2, "ctx2 should have from1 after merge") + assert.Equal(t, "value1", val2) +} + func TestEvaluateEquals(t *testing.T) { tests := []struct { name string @@ -476,7 +520,7 @@ func TestEvaluatorEvaluateCondition(t *testing.T) { "phase": "Active", }) - evaluator := NewEvaluator(ctx) + evaluator := NewEvaluator(ctx, nil) tests := []struct { name string @@ -563,7 +607,7 @@ func TestEvaluatorEvaluateConditions(t *testing.T) { ctx.Set("cloudProvider", "aws") ctx.Set("vpcId", "vpc-12345") - evaluator := NewEvaluator(ctx) + evaluator := NewEvaluator(ctx, nil) tests := []struct { name string @@ -725,7 +769,7 @@ func TestEvaluationError(t *testing.T) { } func TestNewEvaluatorWithNilContext(t *testing.T) { - evaluator := NewEvaluator(nil) + evaluator := NewEvaluator(nil, nil) require.NotNil(t, evaluator) require.NotNil(t, evaluator.context) } @@ -741,7 +785,7 @@ func TestGetField(t *testing.T) { }, }) - evaluator := NewEvaluator(ctx) + evaluator := NewEvaluator(ctx, nil) // Get existing field value, err := evaluator.GetField("cluster.metadata.name") @@ -766,7 +810,7 @@ func TestGetFieldOrDefault(t *testing.T) { }, }) - evaluator := NewEvaluator(ctx) + evaluator := NewEvaluator(ctx, nil) // Get existing field value := evaluator.GetFieldOrDefault("cluster.metadata.name", "default") @@ -787,7 +831,7 @@ func TestEvaluateConditionWithResult(t *testing.T) { ctx.Set("replicas", 3) ctx.Set("provider", "aws") - evaluator := NewEvaluator(ctx) + evaluator := NewEvaluator(ctx, nil) // Test equals - matched result, err := evaluator.EvaluateConditionWithResult("status", OperatorEquals, "Ready") @@ -823,7 +867,7 @@ func TestEvaluateConditionsWithResult(t *testing.T) { ctx.Set("replicas", 3) ctx.Set("provider", "aws") - evaluator := NewEvaluator(ctx) + evaluator := NewEvaluator(ctx, nil) // All conditions pass conditions := []ConditionDef{ @@ -874,7 +918,7 @@ func TestExtractFields(t *testing.T) { }, }) - evaluator := NewEvaluator(ctx) + evaluator := NewEvaluator(ctx, nil) // Extract multiple fields fields := []string{"cluster.metadata.name", "cluster.metadata.namespace", "cluster.status.phase"} @@ -900,7 +944,7 @@ func TestExtractFieldsSafe(t *testing.T) { "status": nil, // null value }) - evaluator := NewEvaluator(ctx) + evaluator := NewEvaluator(ctx, nil) fields := []string{ "cluster.metadata.name", // exists @@ -925,7 +969,7 @@ func TestExtractFieldsOrDefault(t *testing.T) { }, }) - evaluator := NewEvaluator(ctx) + evaluator := NewEvaluator(ctx, nil) fields := map[string]interface{}{ "cluster.metadata.name": "default-name", @@ -971,7 +1015,7 @@ func TestNullHandling(t *testing.T) { }, }) - evaluator := NewEvaluator(ctx) + evaluator := NewEvaluator(ctx, nil) t.Run("access field on null parent", func(t *testing.T) { // Accessing cluster.status.phase when status is null @@ -1025,7 +1069,7 @@ func TestDeepNullPath(t *testing.T) { }, }) - evaluator := NewEvaluator(ctx) + evaluator := NewEvaluator(ctx, nil) // a.b.c is null, so a.b.c.d.e.f should fail gracefully _, err := evaluator.GetField("a.b.c.d.e.f") diff --git a/internal/criteria/evaluator_version_test.go b/internal/criteria/evaluator_version_test.go new file mode 100644 index 0000000..3efb697 --- /dev/null +++ b/internal/criteria/evaluator_version_test.go @@ -0,0 +1,180 @@ +package criteria + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// TestContextVersionTracking verifies that the CEL evaluator is recreated +// when the context is modified after the first evaluation +func TestContextVersionTracking(t *testing.T) { + ctx := NewEvaluationContext() + ctx.Set("status", "Ready") + + evaluator := NewEvaluator(ctx, nil) + + // First CEL evaluation - creates CEL env with only "status" + result1, err1 := evaluator.EvaluateCEL("status == 'Ready'") + require.NoError(t, err1) + require.NotNil(t, result1) + assert.True(t, result1.Matched) + assert.True(t, result1.IsSuccess()) + + // Add new variable AFTER first evaluation + ctx.Set("replicas", 3) + + // Second CEL evaluation - CEL env should be recreated with "replicas" + // This would fail with "undeclared reference to 'replicas'" before the fix + result2, err2 := evaluator.EvaluateCEL("replicas > 0") + require.NoError(t, err2) + require.NotNil(t, result2) + assert.True(t, result2.Matched) + assert.True(t, result2.IsSuccess(), "Should recreate CEL env and recognize new variable") + + // Verify both old and new variables work + result3, err3 := evaluator.EvaluateCEL("status == 'Ready' && replicas == 3") + require.NoError(t, err3) + require.NotNil(t, result3) + assert.True(t, result3.Matched) + assert.True(t, result3.IsSuccess()) +} + +// TestSetVariablesFromMapVersionTracking verifies version tracking with SetVariablesFromMap +func TestSetVariablesFromMapVersionTracking(t *testing.T) { + ctx := NewEvaluationContext() + ctx.SetVariablesFromMap(map[string]interface{}{ + "env": "production", + }) + + evaluator := NewEvaluator(ctx, nil) + + // First evaluation + result1, err1 := evaluator.EvaluateCEL("env == 'production'") + require.NoError(t, err1) + require.NotNil(t, result1) + assert.True(t, result1.Matched) + + // Add more variables using SetVariablesFromMap + ctx.SetVariablesFromMap(map[string]interface{}{ + "region": "us-west-2", + "replicas": 5, + }) + + // Should be able to use new variables + result2, err2 := evaluator.EvaluateCEL("region == 'us-west-2' && replicas == 5") + require.NoError(t, err2) + require.NotNil(t, result2) + assert.True(t, result2.Matched) + assert.True(t, result2.IsSuccess(), "Should recognize variables added via SetVariablesFromMap") +} + +// TestMergeVersionTracking verifies version tracking with Merge +func TestMergeVersionTracking(t *testing.T) { + ctx1 := NewEvaluationContext() + ctx1.Set("a", 1) + + evaluator := NewEvaluator(ctx1, nil) + + // First evaluation + result1, err1 := evaluator.EvaluateCEL("a == 1") + require.NoError(t, err1) + require.NotNil(t, result1) + assert.True(t, result1.Matched) + + // Merge another context + ctx2 := NewEvaluationContext() + ctx2.Set("b", 2) + ctx2.Set("c", 3) + ctx1.Merge(ctx2) + + // Should be able to use merged variables + result2, err2 := evaluator.EvaluateCEL("a == 1 && b == 2 && c == 3") + require.NoError(t, err2) + require.NotNil(t, result2) + assert.True(t, result2.Matched) + assert.True(t, result2.IsSuccess(), "Should recognize variables from merged context") +} + +// TestVersionIncrements verifies that version increments correctly +func TestVersionIncrements(t *testing.T) { + ctx := NewEvaluationContext() + assert.Equal(t, int64(0), ctx.Version(), "Initial version should be 0") + + ctx.Set("a", 1) + assert.Equal(t, int64(1), ctx.Version(), "Version should increment after Set with new key") + + // Same value should NOT increment version + ctx.Set("a", 1) + assert.Equal(t, int64(1), ctx.Version(), "Version should NOT increment when setting same value") + + // Different value SHOULD increment version + ctx.Set("a", 100) + assert.Equal(t, int64(2), ctx.Version(), "Version should increment when value changes") + + ctx.SetVariablesFromMap(map[string]interface{}{"b": 2, "c": 3}) + assert.Equal(t, int64(3), ctx.Version(), "Version should increment after SetVariablesFromMap with new keys") + + // Same values should NOT increment + ctx.SetVariablesFromMap(map[string]interface{}{"b": 2, "c": 3}) + assert.Equal(t, int64(3), ctx.Version(), "Version should NOT increment when SetVariablesFromMap has same values") + + ctx2 := NewEvaluationContext() + ctx2.Set("d", 4) + ctx.Merge(ctx2) + assert.Equal(t, int64(4), ctx.Version(), "Version should increment after Merge with new data") + + // Merge with same data should NOT increment + ctx3 := NewEvaluationContext() + ctx3.Set("d", 4) + ctx.Merge(ctx3) + assert.Equal(t, int64(4), ctx.Version(), "Version should NOT increment after Merge with same data") + + // New keys should still increment + ctx.Set("e", 5) + ctx.Set("f", 6) + assert.Equal(t, int64(6), ctx.Version(), "Version should increment for each new Set") +} + +// TestNoVersionChangeNoRecreate verifies CEL evaluator is not recreated unnecessarily +func TestNoVersionChangeNoRecreate(t *testing.T) { + ctx := NewEvaluationContext() + ctx.Set("status", "Ready") + + evaluator := NewEvaluator(ctx, nil) + + // First evaluation + result1, err1 := evaluator.EvaluateCEL("status == 'Ready'") + require.NoError(t, err1) + require.NotNil(t, result1) + assert.True(t, result1.Matched) + + // Get the CEL evaluator pointer + celEval1, err := evaluator.getCELEvaluator() + require.NoError(t, err) + + // Second evaluation WITHOUT context change + result2, err2 := evaluator.EvaluateCEL("status == 'Ready'") + require.NoError(t, err2) + require.NotNil(t, result2) + assert.True(t, result2.Matched) + + // Get the CEL evaluator pointer again + celEval2, err := evaluator.getCELEvaluator() + require.NoError(t, err) + + // Should be the same instance (not recreated) + assert.Same(t, celEval1, celEval2, "CEL evaluator should not be recreated when context unchanged") + + // Now modify context + ctx.Set("replicas", 3) + + // Get the CEL evaluator pointer after context change + celEval3, err := evaluator.getCELEvaluator() + require.NoError(t, err) + + // Should be a different instance (recreated) + assert.NotSame(t, celEval1, celEval3, "CEL evaluator should be recreated when context changes") +} + diff --git a/internal/criteria/types.go b/internal/criteria/types.go index b9e14b7..dbbad60 100644 --- a/internal/criteria/types.go +++ b/internal/criteria/types.go @@ -2,6 +2,8 @@ package criteria import ( "fmt" + "reflect" + "sync" ) // Operator represents a comparison operator @@ -57,43 +59,140 @@ func OperatorStrings() []string { return result } -// EvaluationContext holds the data available for criteria evaluation +// EvaluationContext holds the data available for criteria evaluation. +// It is safe for concurrent use by multiple goroutines. type EvaluationContext struct { - // Data contains all variables available for evaluation - Data map[string]interface{} + // data contains all variables available for evaluation + data map[string]interface{} + // version tracks modifications to detect when CEL evaluator needs recreation + // This ensures the CEL environment stays in sync with the context data + version int64 + // mu protects concurrent access to data and version + mu sync.RWMutex } // NewEvaluationContext creates a new evaluation context func NewEvaluationContext() *EvaluationContext { return &EvaluationContext{ - Data: make(map[string]interface{}), + data: make(map[string]interface{}), + version: 0, } } -// Set sets a variable in the context +// Version returns the current version of the context. +// The version increments with each modification (Set, SetVariablesFromMap, Merge). +func (c *EvaluationContext) Version() int64 { + c.mu.RLock() + defer c.mu.RUnlock() + return c.version +} + +// Set sets a variable in the context. +// Only increments version if the value actually changes (optimization to avoid unnecessary CEL env recreation). +// This method is safe for concurrent use. func (c *EvaluationContext) Set(key string, value interface{}) { - c.Data[key] = value + c.mu.Lock() + defer c.mu.Unlock() + + // Check if value actually changed + if existing, ok := c.data[key]; ok && reflect.DeepEqual(existing, value) { + return // No change, no version increment + } + + c.data[key] = value + c.version++ } -// Get retrieves a variable from the context +// Get retrieves a variable from the context. +// This method is safe for concurrent use. func (c *EvaluationContext) Get(key string) (interface{}, bool) { - val, ok := c.Data[key] + c.mu.RLock() + defer c.mu.RUnlock() + val, ok := c.data[key] return val, ok } -// GetNestedField retrieves a nested field using dot notation (e.g., "status.phase") +// GetNestedField retrieves a nested field using dot notation (e.g., "status.phase"). +// This method is safe for concurrent use. func (c *EvaluationContext) GetNestedField(path string) (interface{}, error) { - return getNestedField(c.Data, path) + c.mu.RLock() + defer c.mu.RUnlock() + return getNestedField(c.data, path) } -// Merge merges another context into this one +// Merge merges another context into this one. +// Only increments version if any value actually changes. +// This method is safe for concurrent use. +// +// To avoid deadlock when two goroutines call ctx1.Merge(ctx2) and ctx2.Merge(ctx1) +// simultaneously, we first snapshot the other context's data while holding only +// its read lock, then release it before acquiring our write lock. func (c *EvaluationContext) Merge(other *EvaluationContext) { if other == nil { return } - for k, v := range other.Data { - c.Data[k] = v + + // Step 1: Snapshot other's data while holding only its read lock + other.mu.RLock() + otherSnapshot := make(map[string]interface{}, len(other.data)) + for k, v := range other.data { + otherSnapshot[k] = v + } + other.mu.RUnlock() + + // Step 2: Now acquire our write lock and merge the snapshot + // No deadlock possible since we don't hold other's lock anymore + c.mu.Lock() + defer c.mu.Unlock() + + changed := false + for k, v := range otherSnapshot { + if existing, ok := c.data[k]; !ok || !reflect.DeepEqual(existing, v) { + c.data[k] = v + changed = true + } + } + + if changed { + c.version++ + } +} + +// SetVariablesFromMap sets all key-value pairs from the provided map as evaluation variables. +// Only increments version if any value actually changes. +// This method is safe for concurrent use. +func (c *EvaluationContext) SetVariablesFromMap(data map[string]interface{}) { + if data == nil { + return + } + c.mu.Lock() + defer c.mu.Unlock() + + changed := false + for k, v := range data { + if existing, ok := c.data[k]; !ok || !reflect.DeepEqual(existing, v) { + c.data[k] = v + changed = true + } + } + + if changed { + c.version++ + } +} + +// Data returns a copy of the internal data map. +// This is used by CEL evaluator for evaluation. +// Returns a shallow copy to prevent external modification. +func (c *EvaluationContext) Data() map[string]interface{} { + c.mu.RLock() + defer c.mu.RUnlock() + // Return a copy to prevent race conditions during CEL evaluation + copy := make(map[string]interface{}, len(c.data)) + for k, v := range c.data { + copy[k] = v } + return copy } // EvaluationError represents an error during criteria evaluation diff --git a/internal/executor/README.md b/internal/executor/README.md new file mode 100644 index 0000000..9f05c30 --- /dev/null +++ b/internal/executor/README.md @@ -0,0 +1,451 @@ +# Executor Package + +The `executor` package is the core event processing engine for the HyperFleet Adapter. It orchestrates the execution of CloudEvents according to the adapter configuration, coordinating parameter extraction, precondition evaluation, Kubernetes resource management, and post-action execution. + +## Key Concepts + +### Execution Status vs Business Outcomes + +The executor separates **process execution status** from **business outcomes**: + +- **Process Execution Status**: Did the adapter execute successfully? (`success` or `failed`) + - `success`: Adapter ran without process execution errors + - `failed`: Process execution error occurred (API timeout, K8s error, parse error, etc.) + +- **Business Outcomes**: What did the adapter decide to do? + - Resources executed: Preconditions met, resources created/updated + - Resources skipped: Preconditions not met (valid business decision) + +**Important**: Precondition not met is a **successful execution** with resources skipped. It's not a failure! + +## Overview + +The executor implements a four-phase execution pipeline: + +``` +┌──────────────────────────────────────────────────────────────────────┐ +│ Event Processing Pipeline │ +├──────────────────────────────────────────────────────────────────────┤ +│ │ +│ CloudEvent ──► Phase 1 ──► Phase 2 ──► Phase 3 ──► Phase 4 ──► Done │ +│ Extract Precond. Resources Post-Act. │ +│ Params Eval. Create Execute │ +│ │ +└──────────────────────────────────────────────────────────────────────┘ +``` + +## Components + +### Main Components + +| Component | File | Description | +|-----------|------|-------------| +| `Executor` | `executor.go` | Main orchestrator that coordinates all phases | +| `ParamExtractor` | `param_extractor.go` | Extracts parameters from events and environment | +| `PreconditionExecutor` | `precondition_executor.go` | Evaluates preconditions with API calls and CEL | +| `ResourceExecutor` | `resource_executor.go` | Creates/updates Kubernetes resources | +| `PostActionExecutor` | `post_action_executor.go` | Executes post-processing actions | + +### Type Definitions + +| Type | Description | +|------|-------------| +| `ExecutionResult` | Contains the result of processing an event | +| `PreconditionResult` | Result of a single precondition evaluation | +| `ResourceResult` | Result of a single resource operation | +| `PostActionResult` | Result of a single post-action execution | +| `ExecutionContext` | Process execution context during execution | + +## Usage + +### Basic Usage + +
+Click to see basic usage example + +```go +import ( + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/executor" +) + +// Create executor using builder +exec, err := executor.NewBuilder(). + WithAdapterConfig(adapterConfig). + WithAPIClient(apiClient). + WithK8sClient(k8sClient). + WithLogger(log). + Build() +if err != nil { + return err +} + +// Create handler for broker subscription +handler := exec.CreateHandler() + +// Or execute directly +result := exec.Execute(ctx, cloudEvent) +if result.Status == executor.StatusFailed { + log.Errorf("Execution failed: %v", result.Error) +} else if result.ResourcesSkipped { + log.Infof("Execution succeeded, resources skipped: %s", result.SkipReason) +} else { + log.Infof("Execution succeeded") +} +``` + +
+ +### Mock K8s Client for Testing + +For unit tests, use a mock K8s client implementation instead of a real Kubernetes cluster: + +
+Click to see mock K8s client example + +```go +// Create a mock K8s client that implements k8s_client.K8sClient interface +mockK8s := &mockK8sClient{ + // Configure mock responses as needed +} + +exec, err := executor.NewBuilder(). + WithAdapterConfig(adapterConfig). + WithAPIClient(apiClient). + WithK8sClient(mockK8s). // Use mock instead of real client + WithLogger(log). + Build() +``` + +
+ +## Execution Phases + +### Phase 1: Parameter Extraction + +Extracts parameters from various sources: + +- **Environment Variables**: `source: "env.VARIABLE_NAME"` +- **Event Data**: `source: "event.field.path"` +- **Secrets**: `source: "secret.namespace.name.key"` (requires K8s client) +- **ConfigMaps**: `source: "configmap.namespace.name.key"` (requires K8s client) + +
+Parameter extraction example + +```yaml +params: + - name: "clusterId" + source: "event.cluster_id" + type: "string" + required: true + - name: "apiToken" + source: "env.API_TOKEN" + required: true +``` + +
+ +### Phase 2: Precondition Evaluation + +Executes preconditions with optional API calls and condition evaluation: + +
+Precondition with API call example + +```yaml +preconditions: + - name: "checkClusterStatus" + apiCall: + method: "GET" + url: "{{ .apiBaseUrl }}/clusters/{{ .clusterId }}" + capture: + - name: "clusterPhase" + field: "status.phase" + conditions: + - field: "clusterPhase" + operator: "in" + value: ["Ready", "Provisioning"] +``` + +
+ +#### Supported Condition Operators + +| Operator | Description | +|----------|-------------| +| `equals` | Exact equality | +| `notEquals` | Not equal | +| `in` | Value in list | +| `notIn` | Value not in list | +| `contains` | String/array contains | +| `greaterThan` | Numeric comparison | +| `lessThan` | Numeric comparison | +| `exists` | Field exists and is not empty | + +#### CEL Expressions + +For complex conditions, use CEL expressions: + +
+CEL expression example + +```yaml +preconditions: + - name: "complexCheck" + expression: | + clusterPhase == "Ready" && nodeCount >= 3 +``` + +
+ +### Phase 3: Resource Management + +Creates or updates Kubernetes resources from manifests: + +
+Resource management example + +```yaml +resources: + - name: "clusterNamespace" + manifest: + apiVersion: v1 + kind: Namespace + metadata: + name: "cluster-{{ .clusterId }}" + discovery: + byName: "cluster-{{ .clusterId }}" + + - name: "externalTemplate" + manifest: + ref: "templates/deployment.yaml" + discovery: + namespace: "cluster-{{ .clusterId }}" + bySelectors: + labelSelector: + app: "myapp" +``` + +
+ +#### Resource Operations + +| Operation | When | Description | +|-----------|------|-------------| +| `create` | Resource doesn't exist | Creates new resource | +| `update` | Resource exists | Updates existing resource | +| `recreate` | `recreateOnChange: true` | Deletes and recreates | +| `skip` | No changes needed | No operation performed | +| `dry_run` | Dry run mode | Simulated operation | + +### Phase 4: Post-Actions + +Executes post-processing actions like status reporting: + +
+Post-action example + +```yaml +post: + payloads: + - name: "statusPayload" + build: + status: + expression: | + resources.clusterController.status.readyReplicas > 0 + message: + value: "Deployment successful" + + postActions: + - name: "reportStatus" + apiCall: + method: "POST" + url: "{{ .apiBaseUrl }}/clusters/{{ .clusterId }}/status" + body: "{{ .statusPayload }}" +``` + +
+ +## Execution Results + +### ExecutionResult + +
+ExecutionResult structure + +```go +type ExecutionResult struct { + EventID string + Status ExecutionStatus // success, failed (process execution perspective) + Phase ExecutionPhase // where execution ended + Params map[string]interface{} + PreconditionResults []PreconditionResult + ResourceResults []ResourceResult + PostActionResults []PostActionResult + Error error // process execution error only + ErrorReason string // process execution error reason + ResourcesSkipped bool // business outcome: resources were skipped + SkipReason string // why resources were skipped +} +``` + +
+ +### Status Values + +| Status | Description | +|--------|-------------| +| `success` | Execution completed successfully (adapter process execution) | +| `failed` | Execution failed with process execution error (API timeout, K8s error, etc.) | + +**Note**: Precondition not met is a **successful execution** with `ResourcesSkipped=true`. This is a valid business outcome, not a process execution failure. + +## Error Handling + +### Execution Status vs Business Outcomes + +The executor distinguishes between **process execution status** and **business outcomes**: + +| Scenario | `Status` | `ResourcesSkipped` | `SkipReason` | Meaning | +|----------|----------|-------------------|--------------|---------| +| **Success** | `success` | `false` | `""` | Adapter executed successfully, all phases completed | +| **Precondition Not Met** | `success` | `true` | `"precondition..."` | Adapter executed successfully, business logic decided to skip resources | +| **Process Execution Error** | `failed` | `false` | `""` | API timeout, K8s error, parse error, etc. | + +### Precondition Not Met (Business Outcome) + +When preconditions are not satisfied, the executor: +1. Sets status to `success` (adapter executed successfully) +2. Sets `ResourcesSkipped = true` (business outcome) +3. Sets `SkipReason` with detailed explanation +4. Skips resource creation phase +5. Still executes post-actions (for status reporting) + +**This is a valid business outcome, not an error!** + +### Process Execution Errors + +Process execution errors are captured in `ExecutionResult` with: +- `Status`: `failed` +- `Error`: The actual error +- `ErrorReason`: Human-readable reason +- `Phase`: Phase where error occurred + +### Error and Status Reporting + +Post-actions always execute (even on failure) to allow comprehensive status reporting: + +
+Comprehensive status reporting example + +```yaml +post: + payloads: + - name: "statusPayload" + build: + status: + expression: | + adapter.executionStatus == "success" && !adapter.resourcesSkipped + reason: + expression: | + adapter.resourcesSkipped ? "PreconditionNotMet" : + (adapter.errorReason != "" ? adapter.errorReason : "Healthy") + message: + expression: | + adapter.skipReason != "" ? adapter.skipReason : + (adapter.errorMessage != "" ? adapter.errorMessage : "Success") + postActions: + - name: "reportStatus" + apiCall: + method: "POST" + url: "{{ .apiBaseUrl }}/clusters/{{ .clusterId }}/status" + body: "{{ .statusPayload }}" +``` + +
+ +### Available CEL Variables in Post-Actions + +| Variable | Type | Description | +|----------|------|-------------| +| `adapter.executionStatus` | string | `"success"` or `"failed"` (process execution status) | +| `adapter.resourcesSkipped` | bool | Resources were skipped (business outcome) | +| `adapter.skipReason` | string | Why resources were skipped | +| `adapter.errorReason` | string | Process execution error reason (if failed) | +| `adapter.errorMessage` | string | Process execution error message (if failed) | +| `adapter.executionError` | object | Detailed error information (if failed) | + +## Template Rendering + +All string values in the configuration support Go templates: + +```yaml +url: "{{ .apiBaseUrl }}/api/{{ .apiVersion }}/clusters/{{ .clusterId }}" +``` + +### Available Template Variables + +| Source | Example | +|--------|---------| +| Extracted params | `{{ .clusterId }}` | +| Captured fields | `{{ .clusterPhase }}` | +| Adapter metadata | `{{ .metadata.name }}` | +| Event metadata | `{{ .eventMetadata.id }}` | + +## Integration + +### With Broker Consumer + +
+Broker integration example + +```go +// Create executor +exec, _ := executor.NewBuilder(). + WithAdapterConfig(config). + WithAPIClient(apiClient). + WithK8sClient(k8sClient). + WithLogger(log). + Build() + +// Subscribe with executor handler +broker_consumer.Subscribe(ctx, subscriber, topic, exec.CreateHandler()) +``` + +
+ +### Environment Variables + +| Variable | Description | +|----------|-------------| +| `KUBECONFIG` | Path to kubeconfig (for local dev) | + +## Testing + +The executor can be tested with mock API and K8s clients: + +
+Testing example + +```go +// Create mock API client +mockAPIClient := &MockAPIClient{...} + +// Create mock K8s client (implements k8s_client.K8sClient interface) +mockK8s := &MockK8sClient{...} + +// Create executor with mock clients +exec, _ := executor.NewBuilder(). + WithAdapterConfig(config). + WithAPIClient(mockAPIClient). + WithK8sClient(mockK8s). + WithLogger(testLogger). + Build() + +// Execute test event +result := exec.Execute(ctx, testEvent) +assert.Equal(t, executor.StatusSuccess, result.Status) +``` + +
+ diff --git a/internal/executor/executor.go b/internal/executor/executor.go new file mode 100644 index 0000000..b2a8ba8 --- /dev/null +++ b/internal/executor/executor.go @@ -0,0 +1,280 @@ +package executor + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/cloudevents/sdk-go/v2/event" + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/config_loader" + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/hyperfleet_api" + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/k8s_client" + "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/logger" +) + +// NewExecutor creates a new Executor with the given configuration +func NewExecutor(config *ExecutorConfig) (*Executor, error) { + if config == nil { + return nil, NewExecutorError(PhaseParamExtraction, "init", "executor config is required", nil) + } + if config.AdapterConfig == nil { + return nil, NewExecutorError(PhaseParamExtraction, "init", "adapter config is required", nil) + } + if config.APIClient == nil { + return nil, NewExecutorError(PhaseParamExtraction, "init", "API client is required", nil) + } + if config.Logger == nil { + return nil, NewExecutorError(PhaseParamExtraction, "init", "logger is required", nil) + } + + return &Executor{ + config: config, + precondExecutor: NewPreconditionExecutor(config.APIClient), + resourceExecutor: NewResourceExecutor(config.K8sClient), + postActionExecutor: NewPostActionExecutor(config.APIClient), + }, nil +} + +// Execute processes a CloudEvent according to the adapter configuration +// This is the main entry point for event processing +func (e *Executor) Execute(ctx context.Context, evt *event.Event) *ExecutionResult { + // ============================================================================ + // Setup + // ============================================================================ + if evt == nil { + return &ExecutionResult{ + Status: StatusFailed, + Error: NewExecutorError(PhaseParamExtraction, "init", "event is required", nil), + ErrorReason: "nil event received", + } + } + ctxWithEventID := context.WithValue(ctx, logger.EvtIDKey, evt.ID()) + eventLogger := logger.WithEventID(e.config.Logger, evt.ID()) + + // Parse event data at the boundary (decouples CloudEvent from parameter extraction) + eventData, err := parseEventData(evt) + if err != nil { + return &ExecutionResult{ + EventID: evt.ID(), + Status: StatusFailed, + Phase: PhaseParamExtraction, + Error: NewExecutorError(PhaseParamExtraction, "parse_event", "failed to parse event data", err), + ErrorReason: "event data parsing failed", + } + } + + execCtx := NewExecutionContext(ctxWithEventID, evt, eventData) + + // Initialize execution result + result := &ExecutionResult{ + EventID: evt.ID(), + Status: StatusSuccess, + Params: make(map[string]interface{}), + } + + eventLogger.Infof("Starting event execution: id=%s", evt.ID()) + + // ============================================================================ + // Phase 1: Parameter Extraction + // ============================================================================ + result.Phase = PhaseParamExtraction + if err := e.executeParamExtraction(execCtx); err != nil { + return e.finishWithError(result, execCtx, err, "parameter extraction failed", eventLogger) + } + result.Params = execCtx.Params + eventLogger.Infof("Parameter extraction completed: extracted %d params", len(execCtx.Params)) + + // ============================================================================ + // Phase 2: Preconditions + // ============================================================================ + result.Phase = PhasePreconditions + precondOutcome := e.precondExecutor.ExecuteAll(ctxWithEventID, e.config.AdapterConfig.Spec.Preconditions, execCtx, eventLogger) + result.PreconditionResults = precondOutcome.Results + + if precondOutcome.Error != nil { + // Process execution error: precondition evaluation failed + result.Status = StatusFailed + result.Error = precondOutcome.Error + result.ErrorReason = "precondition evaluation failed" + execCtx.SetError("PreconditionFailed", precondOutcome.Error.Error()) + eventLogger.Error(fmt.Sprintf("Precondition execution failed: %v", precondOutcome.Error)) + // Continue to post actions for error reporting + } else if !precondOutcome.AllMatched { + // Business outcome: precondition not satisfied + result.ResourcesSkipped = true + result.SkipReason = precondOutcome.NotMetReason + execCtx.SetSkipped("PreconditionNotMet", precondOutcome.NotMetReason) + eventLogger.Infof("Preconditions not met, resources will be skipped: %s", precondOutcome.NotMetReason) + } else { + // All preconditions matched + eventLogger.Infof("Preconditions completed: %d preconditions evaluated", len(precondOutcome.Results)) + } + + // ============================================================================ + // Phase 3: Resources (skip if preconditions not met or previous error) + // ============================================================================ + result.Phase = PhaseResources + if result.Status == StatusSuccess && !result.ResourcesSkipped { + resourceResults, err := e.resourceExecutor.ExecuteAll(ctxWithEventID, e.config.AdapterConfig.Spec.Resources, execCtx, eventLogger) + result.ResourceResults = resourceResults + + if err != nil { + result.Status = StatusFailed + result.Error = err + result.ErrorReason = "resource execution failed" + execCtx.SetError("ResourceFailed", err.Error()) + eventLogger.Error(fmt.Sprintf("Resource execution failed: %v", err)) + // Continue to post actions for error reporting + } else { + eventLogger.Infof("Resources completed: %d resources processed", len(resourceResults)) + } + } else if result.ResourcesSkipped { + eventLogger.Infof("Resources skipped: %s", result.SkipReason) + } else if result.Status == StatusFailed { + eventLogger.Infof("Resources skipped due to previous error") + } + + // ============================================================================ + // Phase 4: Post Actions (always execute for error reporting) + // ============================================================================ + result.Phase = PhasePostActions + postResults, err := e.postActionExecutor.ExecuteAll(ctxWithEventID, e.config.AdapterConfig.Spec.Post, execCtx, eventLogger) + result.PostActionResults = postResults + + if err != nil { + result.Status = StatusFailed + result.Error = err + result.ErrorReason = "post action execution failed" + eventLogger.Error(fmt.Sprintf("Post action execution failed: %v", err)) + } else { + eventLogger.Infof("Post actions completed: %d actions executed", len(postResults)) + } + + // ============================================================================ + // Finalize + // ============================================================================ + result.ExecutionContext = execCtx + + // Final logging + if result.Status == StatusSuccess { + if result.ResourcesSkipped { + eventLogger.Infof("Event execution completed successfully (resources skipped): id=%s reason=%s", + evt.ID(), result.SkipReason) + } else { + eventLogger.Infof("Event execution completed successfully: id=%s", + evt.ID()) + } + } else { + eventLogger.Error(fmt.Sprintf("Event execution failed: id=%s phase=%s reason=%s", + evt.ID(), result.Phase, result.ErrorReason)) + } + + return result +} + +// finishWithError is a helper to handle early termination with error +func (e *Executor) finishWithError(result *ExecutionResult, execCtx *ExecutionContext, err error, reason string, eventLogger logger.Logger) *ExecutionResult { + result.Status = StatusFailed + result.Error = err + result.ErrorReason = reason + result.ExecutionContext = execCtx + result.Params = execCtx.Params + eventLogger.Error(fmt.Sprintf("Event execution failed: id=%s phase=%s reason=%s", + result.EventID, result.Phase, result.ErrorReason)) + return result +} + +// executeParamExtraction extracts parameters from the event and environment +func (e *Executor) executeParamExtraction(execCtx *ExecutionContext) error { + // Extract configured parameters + if err := extractConfigParams(e.config.AdapterConfig, execCtx, e.config.K8sClient); err != nil { + return err + } + + // Add metadata params + addMetadataParams(e.config.AdapterConfig, execCtx) + + return nil +} + +// CreateHandler creates an event handler function that can be used with the broker subscriber +// This is a convenience method for integrating with the broker_consumer package +func (e *Executor) CreateHandler() func(ctx context.Context, evt *event.Event) error { + return func(ctx context.Context, evt *event.Event) error { + result := e.Execute(ctx, evt) + + if result.Status == StatusFailed { + // Don't NACK for param extraction failures (invalid events should not be retried) + if result.Phase == PhaseParamExtraction { + return nil // ACK the event + } + return result.Error + } + + // StatusSkipped is not an error - preconditions not met is expected behavior + return nil + } +} + + +// parseEventData parses the CloudEvent data payload into a map +// This is done at the boundary to decouple CloudEvent from parameter extraction +func parseEventData(evt *event.Event) (map[string]interface{}, error) { + if evt == nil { + return make(map[string]interface{}), nil + } + + data := evt.Data() + if len(data) == 0 { + return make(map[string]interface{}), nil + } + + var eventData map[string]interface{} + if err := json.Unmarshal(data, &eventData); err != nil { + return nil, fmt.Errorf("failed to parse event data as JSON: %w", err) + } + + return eventData, nil +} + +// ExecutorBuilder provides a fluent interface for building an Executor +type ExecutorBuilder struct { + config *ExecutorConfig +} + +// NewBuilder creates a new ExecutorBuilder +func NewBuilder() *ExecutorBuilder { + return &ExecutorBuilder{ + config: &ExecutorConfig{}, + } +} + +// WithAdapterConfig sets the adapter configuration +func (b *ExecutorBuilder) WithAdapterConfig(config *config_loader.AdapterConfig) *ExecutorBuilder { + b.config.AdapterConfig = config + return b +} + +// WithAPIClient sets the HyperFleet API client +func (b *ExecutorBuilder) WithAPIClient(client hyperfleet_api.Client) *ExecutorBuilder { + b.config.APIClient = client + return b +} + +// WithK8sClient sets the Kubernetes client +func (b *ExecutorBuilder) WithK8sClient(client k8s_client.K8sClient) *ExecutorBuilder { + b.config.K8sClient = client + return b +} + +// WithLogger sets the logger +func (b *ExecutorBuilder) WithLogger(log logger.Logger) *ExecutorBuilder { + b.config.Logger = log + return b +} + +// Build creates the Executor +func (b *ExecutorBuilder) Build() (*Executor, error) { + return NewExecutor(b.config) +} + diff --git a/internal/executor/executor_test.go b/internal/executor/executor_test.go new file mode 100644 index 0000000..7c829e8 --- /dev/null +++ b/internal/executor/executor_test.go @@ -0,0 +1,830 @@ +package executor + +import ( + "context" + "encoding/json" + "testing" + + "github.com/cloudevents/sdk-go/v2/event" + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/config_loader" + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/criteria" + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/hyperfleet_api" + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/k8s_client" + "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/logger" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// newMockAPIClient creates a new mock API client for convenience +func newMockAPIClient() *hyperfleet_api.MockClient { + return hyperfleet_api.NewMockClient() +} + +// mockLogger implements logger.Logger for testing +// It can optionally capture warnings for assertions +type mockLogger struct { + warnings []string +} + +func (m *mockLogger) V(level int32) logger.Logger { return m } +func (m *mockLogger) Infof(format string, args ...interface{}) {} +func (m *mockLogger) Warningf(format string, args ...interface{}) {} +func (m *mockLogger) Errorf(format string, args ...interface{}) {} +func (m *mockLogger) Extra(key string, value interface{}) logger.Logger { return m } +func (m *mockLogger) Info(message string) {} +func (m *mockLogger) Warning(message string) { m.warnings = append(m.warnings, message) } +func (m *mockLogger) Error(message string) {} +func (m *mockLogger) Fatal(message string) {} + +func TestNewExecutor(t *testing.T) { + tests := []struct { + name string + config *ExecutorConfig + expectError bool + }{ + { + name: "nil config", + config: nil, + expectError: true, + }, + { + name: "missing adapter config", + config: &ExecutorConfig{ + APIClient: newMockAPIClient(), + Logger: &mockLogger{}, + }, + expectError: true, + }, + { + name: "missing API client", + config: &ExecutorConfig{ + AdapterConfig: &config_loader.AdapterConfig{}, + Logger: &mockLogger{}, + }, + expectError: true, + }, + { + name: "missing logger", + config: &ExecutorConfig{ + AdapterConfig: &config_loader.AdapterConfig{}, + APIClient: newMockAPIClient(), + }, + expectError: true, + }, + { + name: "valid config", + config: &ExecutorConfig{ + AdapterConfig: &config_loader.AdapterConfig{}, + APIClient: newMockAPIClient(), + Logger: &mockLogger{}, + }, + expectError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + _, err := NewExecutor(tt.config) + if tt.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestExecutorBuilder(t *testing.T) { + config := &config_loader.AdapterConfig{ + Metadata: config_loader.Metadata{ + Name: "test-adapter", + Namespace: "test-ns", + }, + } + + exec, err := NewBuilder(). + WithAdapterConfig(config). + WithAPIClient(newMockAPIClient()). + WithK8sClient(k8s_client.NewMockK8sClient()). + WithLogger(&mockLogger{}). + Build() + + require.NoError(t, err) + require.NotNil(t, exec) +} + +func TestExecutionContext(t *testing.T) { + ctx := context.Background() + evt := event.New() + evt.SetID("test-123") + + execCtx := NewExecutionContext(ctx, &evt, make(map[string]interface{})) + + assert.Equal(t, "test-123", execCtx.Event.ID()) + assert.Empty(t, execCtx.Params) + assert.Empty(t, execCtx.Resources) + assert.Equal(t, string(StatusSuccess), execCtx.Adapter.ExecutionStatus) +} + +func TestExecutionContext_SetError(t *testing.T) { + ctx := context.Background() + evt := event.New() + + execCtx := NewExecutionContext(ctx, &evt, make(map[string]interface{})) + execCtx.SetError("TestReason", "Test message") + + assert.Equal(t, string(StatusFailed), execCtx.Adapter.ExecutionStatus) + assert.Equal(t, "TestReason", execCtx.Adapter.ErrorReason) + assert.Equal(t, "Test message", execCtx.Adapter.ErrorMessage) +} + +func TestExecutionContext_EvaluationTracking(t *testing.T) { + ctx := context.Background() + evt := event.New() + evt.SetID("test-123") + + execCtx := NewExecutionContext(ctx, &evt, make(map[string]interface{})) + + // Verify evaluations are empty initially + assert.Empty(t, execCtx.Evaluations, "expected empty evaluations initially") + + // Add a CEL evaluation + execCtx.AddCELEvaluation(PhasePreconditions, "check-status", "status == 'active'", true) + + require.Len(t, execCtx.Evaluations, 1, "evaluation") + + eval := execCtx.Evaluations[0] + assert.Equal(t, PhasePreconditions, eval.Phase) + assert.Equal(t, "check-status", eval.Name) + assert.Equal(t, EvaluationTypeCEL, eval.EvaluationType) + assert.Equal(t, "status == 'active'", eval.Expression) + assert.True(t, eval.Matched) + + // Add a conditions evaluation with field results (using criteria.EvaluationResult) + fieldResults := map[string]criteria.EvaluationResult{ + "status.phase": { + Field: "status.phase", + Operator: criteria.OperatorEquals, + ExpectedValue: "Running", + FieldValue: "Running", + Matched: true, + }, + "replicas": { + Field: "replicas", + Operator: criteria.OperatorGreaterThan, + ExpectedValue: 0, + FieldValue: 3, + Matched: true, + }, + } + execCtx.AddConditionsEvaluation(PhasePreconditions, "check-replicas", true, fieldResults) + + require.Len(t, execCtx.Evaluations, 2, "evaluations") + + condEval := execCtx.Evaluations[1] + assert.Equal(t, EvaluationTypeConditions, condEval.EvaluationType) + assert.Len(t, condEval.FieldResults, 2) + + // Verify lookup by field name works + assert.Contains(t, condEval.FieldResults, "status.phase") + assert.Equal(t, "Running", condEval.FieldResults["status.phase"].FieldValue) + + assert.Contains(t, condEval.FieldResults, "replicas") + assert.Equal(t, 3, condEval.FieldResults["replicas"].FieldValue) +} + +func TestExecutionContext_GetEvaluationsByPhase(t *testing.T) { + ctx := context.Background() + evt := event.New() + + execCtx := NewExecutionContext(ctx, &evt, make(map[string]interface{})) + + // Add evaluations in different phases + execCtx.AddCELEvaluation(PhasePreconditions, "precond-1", "true", true) + execCtx.AddCELEvaluation(PhasePreconditions, "precond-2", "false", false) + execCtx.AddCELEvaluation(PhasePostActions, "post-1", "true", true) + + // Get preconditions evaluations + precondEvals := execCtx.GetEvaluationsByPhase(PhasePreconditions) + require.Len(t, precondEvals, 2, "precondition evaluations") + + // Get post actions evaluations + postEvals := execCtx.GetEvaluationsByPhase(PhasePostActions) + require.Len(t, postEvals, 1, "post action evaluation") + + // Get resources evaluations (none) + resourceEvals := execCtx.GetEvaluationsByPhase(PhaseResources) + require.Len(t, resourceEvals, 0, "resource evaluations") +} + +func TestExecutionContext_GetFailedEvaluations(t *testing.T) { + ctx := context.Background() + evt := event.New() + + execCtx := NewExecutionContext(ctx, &evt, make(map[string]interface{})) + + // Add mixed evaluations + execCtx.AddCELEvaluation(PhasePreconditions, "passed-1", "true", true) + execCtx.AddCELEvaluation(PhasePreconditions, "failed-1", "false", false) + execCtx.AddCELEvaluation(PhasePreconditions, "passed-2", "true", true) + execCtx.AddCELEvaluation(PhasePostActions, "failed-2", "false", false) + + failedEvals := execCtx.GetFailedEvaluations() + require.Len(t, failedEvals, 2, "failed evaluations") + + // Verify the failed ones are correct + names := make(map[string]bool) + for _, eval := range failedEvals { + names[eval.Name] = true + } + assert.True(t, names["failed-1"], "failed-1") + assert.True(t, names["failed-2"], "failed-2") +} + +func TestExecutorError(t *testing.T) { + err := NewExecutorError(PhasePreconditions, "test-step", "test message", nil) + + expected := "[preconditions] test-step: test message" + if err.Error() != expected { + t.Errorf("expected '%s', got '%s'", expected, err.Error()) + } + + // With wrapped error + wrappedErr := NewExecutorError(PhaseResources, "create", "failed to create", context.Canceled) + assert.Equal(t, context.Canceled, wrappedErr.Unwrap()) +} + +func TestExecute_ParamExtraction(t *testing.T) { + // Set up environment variable for test + t.Setenv("TEST_VAR", "test-value") + + config := &config_loader.AdapterConfig{ + Metadata: config_loader.Metadata{ + Name: "test-adapter", + Namespace: "test-ns", + }, + Spec: config_loader.AdapterConfigSpec{ + Params: []config_loader.Parameter{ + { + Name: "testParam", + Source: "env.TEST_VAR", + Required: true, + }, + { + Name: "eventParam", + Source: "event.cluster_id", + Required: true, + }, + }, + }, + } + + exec, err := NewBuilder(). + WithAdapterConfig(config). + WithAPIClient(newMockAPIClient()). + WithK8sClient(k8s_client.NewMockK8sClient()). + WithLogger(&mockLogger{}). + Build() + + if err != nil { + t.Fatalf("unexpected error creating executor: %v", err) + } + + // Create event with data + evt := event.New() + evt.SetID("test-event-123") + eventData := map[string]interface{}{ + "cluster_id": "cluster-456", + } + eventDataBytes, _ := json.Marshal(eventData) + _ = evt.SetData(event.ApplicationJSON, eventDataBytes) + + // Execute + result := exec.Execute(context.Background(), &evt) + + // Check result + if result.EventID != "test-event-123" { + t.Errorf("expected event ID 'test-event-123', got '%s'", result.EventID) + } + + // Check extracted params + if result.Params["testParam"] != "test-value" { + t.Errorf("expected testParam to be 'test-value', got '%v'", result.Params["testParam"]) + } + + if result.Params["eventParam"] != "cluster-456" { + t.Errorf("expected eventParam to be 'cluster-456', got '%v'", result.Params["eventParam"]) + } +} + +func TestParamExtractor(t *testing.T) { + t.Setenv("TEST_ENV", "env-value") + + evt := event.New() + eventData := map[string]interface{}{ + "cluster_id": "test-cluster", + "nested": map[string]interface{}{ + "value": "nested-value", + }, + } + eventDataBytes, _ := json.Marshal(eventData) + _ = evt.SetData(event.ApplicationJSON, eventDataBytes) + + tests := []struct { + name string + params []config_loader.Parameter + expectKey string + expectValue interface{} + expectError bool + }{ + { + name: "extract from env", + params: []config_loader.Parameter{ + {Name: "envVar", Source: "env.TEST_ENV"}, + }, + expectKey: "envVar", + expectValue: "env-value", + }, + { + name: "extract from event", + params: []config_loader.Parameter{ + {Name: "clusterId", Source: "event.cluster_id"}, + }, + expectKey: "clusterId", + expectValue: "test-cluster", + }, + { + name: "extract nested from event", + params: []config_loader.Parameter{ + {Name: "nestedVal", Source: "event.nested.value"}, + }, + expectKey: "nestedVal", + expectValue: "nested-value", + }, + { + name: "use default for missing optional", + params: []config_loader.Parameter{ + {Name: "optional", Source: "env.MISSING", Default: "default-val"}, + }, + expectKey: "optional", + expectValue: "default-val", + }, + { + name: "fail on missing required", + params: []config_loader.Parameter{ + {Name: "required", Source: "env.MISSING", Required: true}, + }, + expectError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create fresh context for each test + execCtx := NewExecutionContext(context.Background(), &evt, eventData) + + // Create config with test params + config := &config_loader.AdapterConfig{ + Metadata: config_loader.Metadata{ + Name: "test", + Namespace: "default", + }, + Spec: config_loader.AdapterConfigSpec{ + Params: tt.params, + }, + } + + // Extract params using pure function + err := extractConfigParams(config, execCtx, nil) + + if tt.expectError { + assert.Error(t, err) + return + } + + require.NoError(t, err) + + if tt.expectKey != "" { + if execCtx.Params[tt.expectKey] != tt.expectValue { + t.Errorf("expected %s=%v, got %v", tt.expectKey, tt.expectValue, execCtx.Params[tt.expectKey]) + } + } + }) + } +} + +func TestRenderTemplate(t *testing.T) { + tests := []struct { + name string + template string + data map[string]interface{} + expected string + expectError bool + }{ + { + name: "simple variable", + template: "Hello {{ .name }}!", + data: map[string]interface{}{"name": "World"}, + expected: "Hello World!", + }, + { + name: "no template", + template: "plain text", + data: map[string]interface{}{}, + expected: "plain text", + }, + { + name: "nested variable", + template: "{{ .cluster.id }}", + data: map[string]interface{}{ + "cluster": map[string]interface{}{"id": "test-123"}, + }, + expected: "test-123", + }, + { + name: "missing variable", + template: "{{ .missing }}", + data: map[string]interface{}{}, + expectError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := renderTemplate(tt.template, tt.data) + + if tt.expectError { + assert.Error(t, err) + return + } + + require.NoError(t, err) + + if result != tt.expected { + t.Errorf("expected '%s', got '%s'", tt.expected, result) + } + }) + } +} + +// TestSequentialExecution_Preconditions tests that preconditions stop on first failure +func TestSequentialExecution_Preconditions(t *testing.T) { + tests := []struct { + name string + preconditions []config_loader.Precondition + expectedResults int // number of results before stopping + expectError bool + expectNotMet bool + expectedLastName string + }{ + { + name: "all pass - all executed", + preconditions: []config_loader.Precondition{ + {Name: "precond1", Expression: "true"}, + {Name: "precond2", Expression: "true"}, + {Name: "precond3", Expression: "true"}, + }, + expectedResults: 3, + expectError: false, + expectNotMet: false, + expectedLastName: "precond3", + }, + { + name: "first fails - stops immediately", + preconditions: []config_loader.Precondition{ + {Name: "precond1", Expression: "false"}, + {Name: "precond2", Expression: "true"}, + {Name: "precond3", Expression: "true"}, + }, + expectedResults: 1, + expectError: false, + expectNotMet: true, + expectedLastName: "precond1", + }, + { + name: "second fails - first executes, stops at second", + preconditions: []config_loader.Precondition{ + {Name: "precond1", Expression: "true"}, + {Name: "precond2", Expression: "false"}, + {Name: "precond3", Expression: "true"}, + }, + expectedResults: 2, + expectError: false, + expectNotMet: true, + expectedLastName: "precond2", + }, + { + name: "third fails - first two execute, stops at third", + preconditions: []config_loader.Precondition{ + {Name: "precond1", Expression: "true"}, + {Name: "precond2", Expression: "true"}, + {Name: "precond3", Expression: "false"}, + }, + expectedResults: 3, + expectError: false, + expectNotMet: true, + expectedLastName: "precond3", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + config := &config_loader.AdapterConfig{ + Metadata: config_loader.Metadata{ + Name: "test-adapter", + Namespace: "test-ns", + }, + Spec: config_loader.AdapterConfigSpec{ + Preconditions: tt.preconditions, + }, + } + + exec, err := NewBuilder(). + WithAdapterConfig(config). + WithAPIClient(newMockAPIClient()). + WithK8sClient(k8s_client.NewMockK8sClient()). + WithLogger(&mockLogger{}). + Build() + + if err != nil { + t.Fatalf("unexpected error creating executor: %v", err) + } + + evt := event.New() + evt.SetID("test-event-seq") + + result := exec.Execute(context.Background(), &evt) + + // Verify number of precondition results + if len(result.PreconditionResults) != tt.expectedResults { + t.Errorf("expected %d precondition results, got %d", + tt.expectedResults, len(result.PreconditionResults)) + } + + // Verify last executed precondition name + if len(result.PreconditionResults) > 0 { + lastResult := result.PreconditionResults[len(result.PreconditionResults)-1] + if lastResult.Name != tt.expectedLastName { + t.Errorf("expected last precondition to be '%s', got '%s'", + tt.expectedLastName, lastResult.Name) + } + } + + // Verify error/not met status + if tt.expectNotMet { + // Precondition not met is a successful execution, just with resources skipped + assert.Equal(t, StatusSuccess, result.Status, "expected status Success (precondition not met is valid outcome)") + assert.True(t, result.ResourcesSkipped, "ResourcesSkipped") + assert.NotEmpty(t, result.SkipReason, "expected SkipReason to be set") + } + + if !tt.expectNotMet && !tt.expectError { + assert.Equal(t, StatusSuccess, result.Status, "expected status Success") + } + }) + } +} + +// TestSequentialExecution_Resources tests that resources stop on first failure +func TestSequentialExecution_Resources(t *testing.T) { + // Note: This test uses dry-run mode and focuses on the sequential logic + // without requiring a real K8s cluster. Resource sequential execution is better + // tested in integration tests with real K8s API. + + tests := []struct { + name string + resources []config_loader.Resource + expectedResults int + expectFailure bool + }{ + { + name: "single resource with valid manifest", + resources: []config_loader.Resource{ + { + Name: "resource1", + Manifest: map[string]interface{}{ + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": map[string]interface{}{ + "name": "test-cm", + }, + }, + }, + }, + expectedResults: 1, + expectFailure: false, + }, + { + name: "first resource invalid - stops immediately", + resources: []config_loader.Resource{ + {Name: "resource1", Manifest: map[string]interface{}{"kind": "ConfigMap"}}, // Missing apiVersion + { + Name: "resource2", + Manifest: map[string]interface{}{ + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": map[string]interface{}{ + "name": "test-cm2", + }, + }, + }, + }, + expectedResults: 1, // Stops at first failure + expectFailure: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + config := &config_loader.AdapterConfig{ + Metadata: config_loader.Metadata{ + Name: "test-adapter", + Namespace: "test-ns", + }, + Spec: config_loader.AdapterConfigSpec{ + Resources: tt.resources, + }, + } + + exec, err := NewBuilder(). + WithAdapterConfig(config). + WithAPIClient(newMockAPIClient()). + WithK8sClient(k8s_client.NewMockK8sClient()). + WithLogger(&mockLogger{}). + Build() + + if err != nil { + t.Fatalf("unexpected error creating executor: %v", err) + } + + evt := event.New() + evt.SetID("test-event-resources") + + result := exec.Execute(context.Background(), &evt) + + // Verify sequential stop-on-failure: number of results should match expected + if len(result.ResourceResults) != tt.expectedResults { + t.Errorf("expected %d resource results, got %d (sequential execution should stop at failure)", + tt.expectedResults, len(result.ResourceResults)) + } + + // Verify failure status + if tt.expectFailure { + if result.Status == StatusSuccess { + t.Error("expected execution to fail but got success") + } + } + }) + } +} + +// TestSequentialExecution_PostActions tests that post actions stop on first failure +func TestSequentialExecution_PostActions(t *testing.T) { + tests := []struct { + name string + postActions []config_loader.PostAction + mockResponse *hyperfleet_api.Response + mockError error + expectedResults int + expectError bool + }{ + { + name: "all log actions succeed", + postActions: []config_loader.PostAction{ + {Name: "log1", Log: &config_loader.LogAction{Message: "msg1"}}, + {Name: "log2", Log: &config_loader.LogAction{Message: "msg2"}}, + {Name: "log3", Log: &config_loader.LogAction{Message: "msg3"}}, + }, + expectedResults: 3, + expectError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + postConfig := &config_loader.PostConfig{ + PostActions: tt.postActions, + } + + config := &config_loader.AdapterConfig{ + Metadata: config_loader.Metadata{ + Name: "test-adapter", + Namespace: "test-ns", + }, + Spec: config_loader.AdapterConfigSpec{ + Post: postConfig, + }, + } + + mockClient := newMockAPIClient() + mockClient.GetResponse = tt.mockResponse + mockClient.GetError = tt.mockError + mockClient.PostResponse = tt.mockResponse + mockClient.PostError = tt.mockError + + exec, err := NewBuilder(). + WithAdapterConfig(config). + WithAPIClient(mockClient). + WithK8sClient(k8s_client.NewMockK8sClient()). + WithLogger(&mockLogger{}). + Build() + + if err != nil { + t.Fatalf("unexpected error creating executor: %v", err) + } + + evt := event.New() + evt.SetID("test-event-post") + + result := exec.Execute(context.Background(), &evt) + + // Verify number of post action results + if len(result.PostActionResults) != tt.expectedResults { + t.Errorf("expected %d post action results, got %d", + tt.expectedResults, len(result.PostActionResults)) + } + + // Verify error expectation + if tt.expectError && result.Error == nil { + t.Error("expected error but got nil") + } + }) + } +} + +// TestSequentialExecution_SkipReasonCapture tests that SkipReason captures which precondition wasn't met +func TestSequentialExecution_SkipReasonCapture(t *testing.T) { + tests := []struct { + name string + preconditions []config_loader.Precondition + expectedStatus ExecutionStatus + expectSkipped bool + }{ + { + name: "first precondition not met", + preconditions: []config_loader.Precondition{ + {Name: "check1", Expression: "false"}, + {Name: "check2", Expression: "true"}, + {Name: "check3", Expression: "true"}, + }, + expectedStatus: StatusSuccess, // Successful execution, just resources skipped + expectSkipped: true, + }, + { + name: "second precondition not met", + preconditions: []config_loader.Precondition{ + {Name: "check1", Expression: "true"}, + {Name: "check2", Expression: "false"}, + {Name: "check3", Expression: "true"}, + }, + expectedStatus: StatusSuccess, // Successful execution, just resources skipped + expectSkipped: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + config := &config_loader.AdapterConfig{ + Metadata: config_loader.Metadata{ + Name: "test-adapter", + Namespace: "test-ns", + }, + Spec: config_loader.AdapterConfigSpec{ + Preconditions: tt.preconditions, + }, + } + + exec, err := NewBuilder(). + WithAdapterConfig(config). + WithAPIClient(newMockAPIClient()). + WithK8sClient(k8s_client.NewMockK8sClient()). + WithLogger(&mockLogger{}). + Build() + + if err != nil { + t.Fatalf("unexpected error creating executor: %v", err) + } + + evt := event.New() + evt.SetID("test-event-skip") + + result := exec.Execute(context.Background(), &evt) + + // Verify execution status is success (adapter executed successfully) + if result.Status != tt.expectedStatus { + t.Errorf("expected status %s, got %s", tt.expectedStatus, result.Status) + } + + // Verify resources were skipped + if tt.expectSkipped { + assert.True(t, result.ResourcesSkipped, "ResourcesSkipped") + assert.NotEmpty(t, result.SkipReason, "expected SkipReason to be set") + // Verify execution context captures skip information + if result.ExecutionContext != nil { + assert.True(t, result.ExecutionContext.Adapter.ResourcesSkipped, "adapter.ResourcesSkipped") + } + } + }) + } +} + diff --git a/internal/executor/param_extractor.go b/internal/executor/param_extractor.go new file mode 100644 index 0000000..3f4b8f2 --- /dev/null +++ b/internal/executor/param_extractor.go @@ -0,0 +1,151 @@ +package executor + +import ( + "context" + "fmt" + "os" + "strings" + + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/config_loader" + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/k8s_client" +) + +// extractConfigParams extracts all configured parameters and populates execCtx.Params +// This is a pure function that directly modifies execCtx for simplicity +func extractConfigParams(config *config_loader.AdapterConfig, execCtx *ExecutionContext, k8sClient k8s_client.K8sClient) error { + for _, param := range config.Spec.Params { + value, err := extractParam(execCtx.Ctx, param, execCtx.EventData, k8sClient) + if err != nil { + if param.Required { + return NewExecutorError(PhaseParamExtraction, param.Name, + fmt.Sprintf("failed to extract required parameter: %s", param.Source), err) + } + // Use default for non-required params + if param.Default != nil { + execCtx.Params[param.Name] = param.Default + } + continue + } + + // Apply default if value is nil + if value == nil && param.Default != nil { + value = param.Default + } + + if value != nil { + execCtx.Params[param.Name] = value + } + } + + return nil +} + +// extractParam extracts a single parameter based on its source +func extractParam(ctx context.Context, param config_loader.Parameter, eventData map[string]interface{}, k8sClient k8s_client.K8sClient) (interface{}, error) { + source := param.Source + + // Handle different source types + switch { + case strings.HasPrefix(source, "env."): + return extractFromEnv(source[4:]) + case strings.HasPrefix(source, "event."): + return extractFromEvent(source[6:], eventData) + case strings.HasPrefix(source, "secret."): + return extractFromSecret(ctx, source[7:], k8sClient) + case strings.HasPrefix(source, "configmap."): + return extractFromConfigMap(ctx, source[10:], k8sClient) + case source == "": + // No source specified, return default or nil + return param.Default, nil + default: + // Try to extract from event data directly + return extractFromEvent(source, eventData) + } +} + +// extractFromEnv extracts a value from environment variables +func extractFromEnv(envVar string) (interface{}, error) { + value, exists := os.LookupEnv(envVar) + if !exists { + return nil, fmt.Errorf("environment variable %s not set", envVar) + } + return value, nil +} + +// extractFromEvent extracts a value from event data using dot notation +func extractFromEvent(path string, eventData map[string]interface{}) (interface{}, error) { + parts := strings.Split(path, ".") + var current interface{} = eventData + + for i, part := range parts { + switch v := current.(type) { + case map[string]interface{}: + val, ok := v[part] + if !ok { + return nil, fmt.Errorf("field '%s' not found at path '%s'", part, strings.Join(parts[:i+1], ".")) + } + current = val + case map[interface{}]interface{}: + val, ok := v[part] + if !ok { + return nil, fmt.Errorf("field '%s' not found at path '%s'", part, strings.Join(parts[:i+1], ".")) + } + current = val + default: + return nil, fmt.Errorf("cannot access field '%s': parent is not a map (got %T)", part, current) + } + } + + return current, nil +} + +// extractFromSecret extracts a value from a Kubernetes Secret +// Format: secret... (namespace is required) +func extractFromSecret(ctx context.Context, path string, k8sClient k8s_client.K8sClient) (interface{}, error) { + if k8sClient == nil { + return nil, fmt.Errorf("kubernetes client not configured, cannot extract from secret") + } + + value, err := k8sClient.ExtractFromSecret(ctx, path) + if err != nil { + return nil, err + } + + return value, nil +} + +// extractFromConfigMap extracts a value from a Kubernetes ConfigMap +// Format: configmap... (namespace is required) +func extractFromConfigMap(ctx context.Context, path string, k8sClient k8s_client.K8sClient) (interface{}, error) { + if k8sClient == nil { + return nil, fmt.Errorf("kubernetes client not configured, cannot extract from configmap") + } + + value, err := k8sClient.ExtractFromConfigMap(ctx, path) + if err != nil { + return nil, err + } + + return value, nil +} + +// addMetadataParams adds adapter and event metadata to execCtx.Params +func addMetadataParams(config *config_loader.AdapterConfig, execCtx *ExecutionContext) { + // Add metadata from adapter config + execCtx.Params["metadata"] = map[string]interface{}{ + "name": config.Metadata.Name, + "namespace": config.Metadata.Namespace, + "labels": config.Metadata.Labels, + } + + // Add event metadata if available + if execCtx.Event != nil { + execCtx.Params["eventMetadata"] = map[string]interface{}{ + "id": execCtx.Event.ID(), + "type": execCtx.Event.Type(), + "source": execCtx.Event.Source(), + "time": execCtx.Event.Time().String(), + } + } +} + diff --git a/internal/executor/post_action_executor.go b/internal/executor/post_action_executor.go new file mode 100644 index 0000000..e6f4cd1 --- /dev/null +++ b/internal/executor/post_action_executor.go @@ -0,0 +1,258 @@ +package executor + +import ( + "context" + "encoding/json" + "fmt" + "strings" + + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/config_loader" + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/criteria" + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/hyperfleet_api" + "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/logger" +) + +// PostActionExecutor executes post-processing actions +type PostActionExecutor struct { + apiClient hyperfleet_api.Client +} + +// NewPostActionExecutor creates a new post-action executor +func NewPostActionExecutor(apiClient hyperfleet_api.Client) *PostActionExecutor { + return &PostActionExecutor{ + apiClient: apiClient, + } +} + +// ExecuteAll executes all post-processing actions +// First builds payloads from post.payloads, then executes post.postActions +func (pae *PostActionExecutor) ExecuteAll(ctx context.Context, postConfig *config_loader.PostConfig, execCtx *ExecutionContext, log logger.Logger) ([]PostActionResult, error) { + if postConfig == nil { + return []PostActionResult{}, nil + } + + // Step 1: Build post payloads (like clusterStatusPayload) + if len(postConfig.Payloads) > 0 { + if err := buildPostPayloads(postConfig.Payloads, execCtx, log); err != nil { + execCtx.Adapter.ExecutionError = &ExecutionError{ + Phase: string(PhasePostActions), + Step: "build_payloads", + Message: err.Error(), + } + return []PostActionResult{}, NewExecutorError(PhasePostActions, "build_payloads", "failed to build post payloads", err) + } + } + + // Step 2: Execute post actions (sequential - stop on first failure) + results := make([]PostActionResult, 0, len(postConfig.PostActions)) + for _, action := range postConfig.PostActions { + result, err := pae.executePostAction(ctx, action, execCtx, log) + results = append(results, result) + + if err != nil { + log.Error(fmt.Sprintf("Post action '%s' failed: %v", action.Name, err)) + + // Set ExecutionError for failed post action + execCtx.Adapter.ExecutionError = &ExecutionError{ + Phase: string(PhasePostActions), + Step: action.Name, + Message: err.Error(), + } + + // Stop execution - don't run remaining post actions + return results, err + } + } + + return results, nil +} + +// buildPostPayloads builds all post payloads and stores them in execCtx.Params +// Payloads are complex structures built from CEL expressions and templates +func buildPostPayloads(payloads []config_loader.Payload, execCtx *ExecutionContext, log logger.Logger) error { + // Create evaluation context with all params for CEL expressions + evalCtx := criteria.NewEvaluationContext() + evalCtx.SetVariablesFromMap(execCtx.Params) + // Add adapter metadata for CEL expressions (convert to map) + evalCtx.Set("adapter", adapterMetadataToMap(&execCtx.Adapter)) + + evaluator := criteria.NewEvaluator(evalCtx, log) + + for _, payload := range payloads { + // Determine build source (inline Build or BuildRef) + var buildDef any + if payload.Build != nil { + buildDef = payload.Build + } else if payload.BuildRefContent != nil { + buildDef = payload.BuildRefContent + } else { + return fmt.Errorf("payload '%s' has neither Build nor BuildRefContent", payload.Name) + } + + // Build the payload + builtPayload, err := buildPayload(buildDef, evaluator, execCtx.Params, log) + if err != nil { + return fmt.Errorf("failed to build payload '%s': %w", payload.Name, err) + } + + // Convert to JSON for template rendering (templates will render maps as "map[...]" otherwise) + jsonBytes, err := json.Marshal(builtPayload) + if err != nil { + return fmt.Errorf("failed to marshal payload '%s' to JSON: %w", payload.Name, err) + } + + // Store as JSON string in params for use in post action templates + execCtx.Params[payload.Name] = string(jsonBytes) + } + + return nil +} + +// buildPayload builds a payload from a build definition +// The build definition can contain expressions that need to be evaluated +func buildPayload(build any, evaluator *criteria.Evaluator, params map[string]any, log logger.Logger) (any, error) { + switch v := build.(type) { + case map[string]any: + return buildMapPayload(v, evaluator, params, log) + case map[any]any: + converted := convertToStringKeyMap(v) + return buildMapPayload(converted, evaluator, params, log) + default: + return build, nil + } +} + +// buildMapPayload builds a map payload, evaluating expressions as needed +func buildMapPayload(m map[string]any, evaluator *criteria.Evaluator, params map[string]any, log logger.Logger) (map[string]any, error) { + result := make(map[string]any) + + for k, v := range m { + // Render the key + renderedKey, err := renderTemplate(k, params) + if err != nil { + return nil, fmt.Errorf("failed to render key '%s': %w", k, err) + } + + // Process the value + processedValue, err := processValue(v, evaluator, params, log) + if err != nil { + return nil, fmt.Errorf("failed to process value for key '%s': %w", k, err) + } + + result[renderedKey] = processedValue + } + + return result, nil +} + +// processValue processes a value, evaluating expressions as needed +func processValue(v any, evaluator *criteria.Evaluator, params map[string]any, log logger.Logger) (any, error) { + switch val := v.(type) { + case map[string]any: + // Check if this is an expression definition + if expr, ok := val["expression"].(string); ok { + // Evaluate CEL expression + result, err := evaluator.EvaluateCEL(strings.TrimSpace(expr)) + if err != nil { + log.Error(fmt.Sprintf("failed to evaluate CEL expression '%s': %v", expr, err)) + return nil, err + } + if result.HasError() { + // result Error recorded the eval error reason, like key not exists, etc. + // just log the error reason for debugging, it's expected when resource is not created yet. + // like resources.cluster.status.phase == "Running", when status not exist in cluster object yet + // there will be error "no such key: cluster.status" + // log it as debug info, not an error for further processing + log.V(2).Infof("CEL expression evaluation failed: %v", result.ErrorReason) + } + return result.Value, nil + } + + // Check if this is a simple value definition + if value, ok := val["value"]; ok { + // Render template if it's a string + if strVal, ok := value.(string); ok { + return renderTemplate(strVal, params) + } + return value, nil + } + + // Recursively process nested maps + return buildMapPayload(val, evaluator, params, log) + + case map[any]any: + converted := convertToStringKeyMap(val) + return processValue(converted, evaluator, params, log) + + case []any: + result := make([]any, len(val)) + for i, item := range val { + processed, err := processValue(item, evaluator, params, log) + if err != nil { + return nil, err + } + result[i] = processed + } + return result, nil + + case string: + return renderTemplate(val, params) + + default: + return v, nil + } +} + +// executePostAction executes a single post-action +func (pae *PostActionExecutor) executePostAction(ctx context.Context, action config_loader.PostAction, execCtx *ExecutionContext, log logger.Logger) (PostActionResult, error) { + result := PostActionResult{ + Name: action.Name, + Status: StatusSuccess, + } + + log.Infof("Executing post action: %s", action.Name) + + // Execute log action if configured + if action.Log != nil { + ExecuteLogAction(action.Log, execCtx, log) + } + + // Execute API call if configured + if action.APICall != nil { + if err := pae.executeAPICall(ctx, action.APICall, execCtx, &result, log); err != nil { + return result, err + } + } + + log.Infof("Post action '%s' completed", action.Name) + + return result, nil +} + +// executeAPICall executes an API call and populates the result with response details +func (pae *PostActionExecutor) executeAPICall(ctx context.Context, apiCall *config_loader.APICall, execCtx *ExecutionContext, result *PostActionResult, log logger.Logger) error { + resp, url, err := ExecuteAPICall(ctx, apiCall, execCtx, pae.apiClient, log) + result.APICallMade = true + + // Capture response details if available (even if err != nil) + if resp != nil { + result.APIResponse = resp.Body + result.HTTPStatus = resp.StatusCode + } + + // Validate response - returns APIError with full metadata if validation fails + if validationErr := ValidateAPIResponse(resp, err, apiCall.Method, url); validationErr != nil { + result.Status = StatusFailed + result.Error = validationErr + + // Determine error context + errorContext := "API call failed" + if err == nil && resp != nil && !resp.IsSuccess() { + errorContext = "API call returned non-success status" + } + + return NewExecutorError(PhasePostActions, result.Name, errorContext, validationErr) + } + + return nil +} diff --git a/internal/executor/post_action_executor_test.go b/internal/executor/post_action_executor_test.go new file mode 100644 index 0000000..536e781 --- /dev/null +++ b/internal/executor/post_action_executor_test.go @@ -0,0 +1,665 @@ +package executor + +import ( + "context" + "net/http" + "testing" + + "github.com/cloudevents/sdk-go/v2/event" + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/config_loader" + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/criteria" + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/hyperfleet_api" + "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/logger" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// testLogger implements logger.Logger for testing +type testLogger struct{} + +func (l *testLogger) V(level int32) logger.Logger { return l } +func (l *testLogger) Infof(format string, args ...interface{}) {} +func (l *testLogger) Warningf(format string, args ...interface{}) {} +func (l *testLogger) Errorf(format string, args ...interface{}) {} +func (l *testLogger) Extra(key string, value interface{}) logger.Logger { return l } +func (l *testLogger) Info(message string) {} +func (l *testLogger) Warning(message string) {} +func (l *testLogger) Error(message string) {} +func (l *testLogger) Fatal(message string) {} + +func TestBuildPayload(t *testing.T) { + log := &testLogger{} + + tests := []struct { + name string + build interface{} + params map[string]interface{} + expected interface{} + expectError bool + }{ + { + name: "nil build returns nil", + build: nil, + params: map[string]interface{}{}, + expected: nil, + }, + { + name: "string value passthrough", + build: "simple string", + params: map[string]interface{}{}, + expected: "simple string", + }, + { + name: "int value passthrough", + build: 42, + params: map[string]interface{}{}, + expected: 42, + }, + { + name: "simple map", + build: map[string]interface{}{ + "key1": "value1", + "key2": "value2", + }, + params: map[string]interface{}{}, + expected: map[string]interface{}{ + "key1": "value1", + "key2": "value2", + }, + }, + { + name: "map with template key", + build: map[string]interface{}{ + "{{ .keyName }}": "value", + }, + params: map[string]interface{}{ + "keyName": "dynamicKey", + }, + expected: map[string]interface{}{ + "dynamicKey": "value", + }, + }, + { + name: "map[any]any conversion", + build: map[interface{}]interface{}{ + "key1": "value1", + "key2": 123, + }, + params: map[string]interface{}{}, + expected: map[string]interface{}{ + "key1": "value1", + "key2": 123, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create evaluator context + evalCtx := criteria.NewEvaluationContext() + for k, v := range tt.params { + evalCtx.Set(k, v) + } + evaluator := criteria.NewEvaluator(evalCtx, log) + + result, err := buildPayload(tt.build, evaluator, tt.params, log) + + if tt.expectError { + assert.Error(t, err) + return + } + + require.NoError(t, err) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestBuildMapPayload(t *testing.T) { + log := &testLogger{} + + tests := []struct { + name string + input map[string]interface{} + params map[string]interface{} + expected map[string]interface{} + expectError bool + }{ + { + name: "empty map", + input: map[string]interface{}{}, + params: map[string]interface{}{}, + expected: map[string]interface{}{}, + }, + { + name: "simple key-value pairs", + input: map[string]interface{}{ + "status": "active", + "count": 10, + "enabled": true, + }, + params: map[string]interface{}{}, + expected: map[string]interface{}{ + "status": "active", + "count": 10, + "enabled": true, + }, + }, + { + name: "template in value", + input: map[string]interface{}{ + "message": "Hello {{ .name }}", + }, + params: map[string]interface{}{ + "name": "World", + }, + expected: map[string]interface{}{ + "message": "Hello World", + }, + }, + { + name: "nested map", + input: map[string]interface{}{ + "outer": map[string]interface{}{ + "inner": "value", + }, + }, + params: map[string]interface{}{}, + expected: map[string]interface{}{ + "outer": map[string]interface{}{ + "inner": "value", + }, + }, + }, + { + name: "value definition", + input: map[string]interface{}{ + "field": map[string]interface{}{ + "value": "static value", + }, + }, + params: map[string]interface{}{}, + expected: map[string]interface{}{ + "field": "static value", + }, + }, + { + name: "value definition with template", + input: map[string]interface{}{ + "field": map[string]interface{}{ + "value": "cluster-{{ .id }}", + }, + }, + params: map[string]interface{}{ + "id": "123", + }, + expected: map[string]interface{}{ + "field": "cluster-123", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + evalCtx := criteria.NewEvaluationContext() + for k, v := range tt.params { + evalCtx.Set(k, v) + } + evaluator := criteria.NewEvaluator(evalCtx, log) + + result, err := buildMapPayload(tt.input, evaluator, tt.params, log) + + if tt.expectError { + assert.Error(t, err) + return + } + + require.NoError(t, err) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestProcessValue(t *testing.T) { + log := &testLogger{} + + tests := []struct { + name string + value interface{} + params map[string]interface{} + evalCtxData map[string]interface{} + expected interface{} + expectError bool + }{ + { + name: "string without template", + value: "plain string", + params: map[string]interface{}{}, + expected: "plain string", + }, + { + name: "string with template", + value: "Hello {{ .name }}", + params: map[string]interface{}{"name": "World"}, + expected: "Hello World", + }, + { + name: "integer passthrough", + value: 42, + params: map[string]interface{}{}, + expected: 42, + }, + { + name: "boolean passthrough", + value: true, + params: map[string]interface{}{}, + expected: true, + }, + { + name: "float passthrough", + value: 3.14, + params: map[string]interface{}{}, + expected: 3.14, + }, + { + name: "expression evaluation", + value: map[string]interface{}{ + "expression": "1 + 2", + }, + params: map[string]interface{}{}, + evalCtxData: map[string]interface{}{}, + expected: int64(3), + }, + { + name: "expression with context variable", + value: map[string]interface{}{ + "expression": "count * 2", + }, + params: map[string]interface{}{}, + evalCtxData: map[string]interface{}{"count": 5}, + expected: int64(10), + }, + { + name: "value definition", + value: map[string]interface{}{ + "value": "static", + }, + params: map[string]interface{}{}, + expected: "static", + }, + { + name: "value definition with non-string", + value: map[string]interface{}{ + "value": 123, + }, + params: map[string]interface{}{}, + expected: 123, + }, + { + name: "slice processing", + value: []interface{}{"a", "b", "c"}, + params: map[string]interface{}{}, + expected: []interface{}{"a", "b", "c"}, + }, + { + name: "slice with templates", + value: []interface{}{ + "{{ .prefix }}-1", + "{{ .prefix }}-2", + }, + params: map[string]interface{}{"prefix": "item"}, + expected: []interface{}{ + "item-1", + "item-2", + }, + }, + { + name: "map[any]any conversion", + value: map[interface{}]interface{}{ + "key": "value", + }, + params: map[string]interface{}{}, + expected: map[string]interface{}{ + "key": "value", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + evalCtx := criteria.NewEvaluationContext() + for k, v := range tt.evalCtxData { + evalCtx.Set(k, v) + } + evaluator := criteria.NewEvaluator(evalCtx, log) + + result, err := processValue(tt.value, evaluator, tt.params, log) + + if tt.expectError { + assert.Error(t, err) + return + } + + require.NoError(t, err) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestPostActionExecutor_ExecuteAll(t *testing.T) { + tests := []struct { + name string + postConfig *config_loader.PostConfig + mockResponse *hyperfleet_api.Response + expectedResults int + expectError bool + }{ + { + name: "nil post config", + postConfig: nil, + expectedResults: 0, + expectError: false, + }, + { + name: "empty post actions", + postConfig: &config_loader.PostConfig{ + PostActions: []config_loader.PostAction{}, + }, + expectedResults: 0, + expectError: false, + }, + { + name: "single log action", + postConfig: &config_loader.PostConfig{ + PostActions: []config_loader.PostAction{ + { + Name: "log-status", + Log: &config_loader.LogAction{Message: "Processing complete", Level: "info"}, + }, + }, + }, + expectedResults: 1, + expectError: false, + }, + { + name: "multiple log actions", + postConfig: &config_loader.PostConfig{ + PostActions: []config_loader.PostAction{ + {Name: "log1", Log: &config_loader.LogAction{Message: "Step 1", Level: "info"}}, + {Name: "log2", Log: &config_loader.LogAction{Message: "Step 2", Level: "info"}}, + {Name: "log3", Log: &config_loader.LogAction{Message: "Step 3", Level: "info"}}, + }, + }, + expectedResults: 3, + expectError: false, + }, + { + name: "with payloads", + postConfig: &config_loader.PostConfig{ + Payloads: []config_loader.Payload{ + { + Name: "statusPayload", + Build: map[string]interface{}{ + "status": "completed", + }, + }, + }, + PostActions: []config_loader.PostAction{ + {Name: "log1", Log: &config_loader.LogAction{Message: "Done", Level: "info"}}, + }, + }, + expectedResults: 1, + expectError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mockClient := hyperfleet_api.NewMockClient() + if tt.mockResponse != nil { + mockClient.DoResponse = tt.mockResponse + } + + pae := NewPostActionExecutor(mockClient) + + evt := event.New() + evt.SetID("test-event") + execCtx := NewExecutionContext(context.Background(), &evt, map[string]interface{}{}) + + log := &testLogger{} + + results, err := pae.ExecuteAll( + context.Background(), + tt.postConfig, + execCtx, + log, + ) + + if tt.expectError { + assert.Error(t, err) + return + } + + require.NoError(t, err) + assert.Len(t, results, tt.expectedResults) + }) + } +} + +func TestExecuteAPICall(t *testing.T) { + tests := []struct { + name string + apiCall *config_loader.APICall + params map[string]interface{} + mockResponse *hyperfleet_api.Response + mockError error + expectError bool + expectedURL string + }{ + { + name: "nil api call", + apiCall: nil, + params: map[string]interface{}{}, + expectError: true, + }, + { + name: "simple GET request", + apiCall: &config_loader.APICall{ + Method: "GET", + URL: "http://api.example.com/clusters", + }, + params: map[string]interface{}{}, + mockResponse: &hyperfleet_api.Response{ + StatusCode: http.StatusOK, + Status: "200 OK", + Body: []byte(`{"status":"ok"}`), + }, + expectError: false, + expectedURL: "http://api.example.com/clusters", + }, + { + name: "GET request with URL template", + apiCall: &config_loader.APICall{ + Method: "GET", + URL: "http://api.example.com/clusters/{{ .clusterId }}", + }, + params: map[string]interface{}{ + "clusterId": "cluster-123", + }, + mockResponse: &hyperfleet_api.Response{ + StatusCode: http.StatusOK, + Status: "200 OK", + Body: []byte(`{}`), + }, + expectError: false, + expectedURL: "http://api.example.com/clusters/cluster-123", + }, + { + name: "POST request with body", + apiCall: &config_loader.APICall{ + Method: "POST", + URL: "http://api.example.com/clusters", + Body: `{"name": "{{ .name }}"}`, + }, + params: map[string]interface{}{ + "name": "new-cluster", + }, + mockResponse: &hyperfleet_api.Response{ + StatusCode: http.StatusCreated, + Status: "201 Created", + }, + expectError: false, + expectedURL: "http://api.example.com/clusters", + }, + { + name: "PUT request", + apiCall: &config_loader.APICall{ + Method: "PUT", + URL: "http://api.example.com/clusters/{{ .id }}", + Body: `{"status": "updated"}`, + }, + params: map[string]interface{}{"id": "123"}, + mockResponse: &hyperfleet_api.Response{ + StatusCode: http.StatusOK, + Status: "200 OK", + }, + expectError: false, + expectedURL: "http://api.example.com/clusters/123", + }, + { + name: "PATCH request", + apiCall: &config_loader.APICall{ + Method: "PATCH", + URL: "http://api.example.com/clusters/123", + Body: `{"field": "value"}`, + }, + params: map[string]interface{}{}, + mockResponse: &hyperfleet_api.Response{ + StatusCode: http.StatusOK, + Status: "200 OK", + }, + expectError: false, + expectedURL: "http://api.example.com/clusters/123", + }, + { + name: "DELETE request", + apiCall: &config_loader.APICall{ + Method: "DELETE", + URL: "http://api.example.com/clusters/123", + }, + params: map[string]interface{}{}, + mockResponse: &hyperfleet_api.Response{ + StatusCode: http.StatusNoContent, + Status: "204 No Content", + }, + expectError: false, + expectedURL: "http://api.example.com/clusters/123", + }, + { + name: "unsupported HTTP method", + apiCall: &config_loader.APICall{ + Method: "INVALID", + URL: "http://api.example.com/test", + }, + params: map[string]interface{}{}, + expectError: true, + }, + { + name: "request with headers", + apiCall: &config_loader.APICall{ + Method: "GET", + URL: "http://api.example.com/clusters", + Headers: []config_loader.Header{ + {Name: "Authorization", Value: "Bearer {{ .token }}"}, + {Name: "X-Request-ID", Value: "{{ .requestId }}"}, + }, + }, + params: map[string]interface{}{ + "token": "secret-token", + "requestId": "req-123", + }, + mockResponse: &hyperfleet_api.Response{ + StatusCode: http.StatusOK, + Status: "200 OK", + }, + expectError: false, + expectedURL: "http://api.example.com/clusters", + }, + { + name: "request with timeout", + apiCall: &config_loader.APICall{ + Method: "GET", + URL: "http://api.example.com/slow", + Timeout: "30s", + }, + params: map[string]interface{}{}, + mockResponse: &hyperfleet_api.Response{ + StatusCode: http.StatusOK, + Status: "200 OK", + }, + expectError: false, + expectedURL: "http://api.example.com/slow", + }, + { + name: "request with retry config", + apiCall: &config_loader.APICall{ + Method: "GET", + URL: "http://api.example.com/flaky", + RetryAttempts: 3, + RetryBackoff: "exponential", + }, + params: map[string]interface{}{}, + mockResponse: &hyperfleet_api.Response{ + StatusCode: http.StatusOK, + Status: "200 OK", + }, + expectError: false, + expectedURL: "http://api.example.com/flaky", + }, + { + name: "URL template error", + apiCall: &config_loader.APICall{ + Method: "GET", + URL: "http://api.example.com/{{ .missing }}", + }, + params: map[string]interface{}{}, + expectError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mockClient := hyperfleet_api.NewMockClient() + if tt.mockResponse != nil { + mockClient.DoResponse = tt.mockResponse + } + if tt.mockError != nil { + mockClient.DoError = tt.mockError + } + + evt := event.New() + execCtx := NewExecutionContext(context.Background(), &evt, map[string]interface{}{}) + execCtx.Params = tt.params + + log := &testLogger{} + + resp, url, err := ExecuteAPICall( + context.Background(), + tt.apiCall, + execCtx, + mockClient, + log, + ) + + if tt.expectError { + assert.Error(t, err) + return + } + + require.NoError(t, err) + assert.NotNil(t, resp) + assert.Equal(t, tt.expectedURL, url) + }) + } +} + diff --git a/internal/executor/precondition_executor.go b/internal/executor/precondition_executor.go new file mode 100644 index 0000000..4e24c01 --- /dev/null +++ b/internal/executor/precondition_executor.go @@ -0,0 +1,244 @@ +package executor + +import ( + "context" + "encoding/json" + "fmt" + "strings" + + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/config_loader" + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/criteria" + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/hyperfleet_api" + "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/logger" +) + +// PreconditionExecutor evaluates preconditions +type PreconditionExecutor struct { + apiClient hyperfleet_api.Client +} + +// NewPreconditionExecutor creates a new precondition executor +func NewPreconditionExecutor(apiClient hyperfleet_api.Client) *PreconditionExecutor { + return &PreconditionExecutor{ + apiClient: apiClient, + } +} + +// ExecuteAll executes all preconditions in sequence +// Returns a high-level outcome with match status and individual results +func (pe *PreconditionExecutor) ExecuteAll(ctx context.Context, preconditions []config_loader.Precondition, execCtx *ExecutionContext, log logger.Logger) *PreconditionsOutcome { + results := make([]PreconditionResult, 0, len(preconditions)) + + for _, precond := range preconditions { + result, err := pe.executePrecondition(ctx, precond, execCtx, log) + results = append(results, result) + + if err != nil { + // Execution error (API call failed, parse error, etc.) + return &PreconditionsOutcome{ + AllMatched: false, + Results: results, + Error: err, + } + } + + if !result.Matched { + // Business outcome: precondition not satisfied + return &PreconditionsOutcome{ + AllMatched: false, + Results: results, + Error: nil, + NotMetReason: fmt.Sprintf("precondition '%s' not met: %s", precond.Name, formatConditionDetails(result)), + } + } + } + + // All preconditions matched + return &PreconditionsOutcome{ + AllMatched: true, + Results: results, + Error: nil, + } +} + +// executePrecondition executes a single precondition +func (pe *PreconditionExecutor) executePrecondition(ctx context.Context, precond config_loader.Precondition, execCtx *ExecutionContext, log logger.Logger) (PreconditionResult, error) { + result := PreconditionResult{ + Name: precond.Name, + Status: StatusSuccess, + CapturedFields: make(map[string]interface{}), + } + + log.Infof("Evaluating precondition: %s", precond.Name) + + // Step 1: Execute log action if configured + if precond.Log != nil { + ExecuteLogAction(precond.Log, execCtx, log) + } + + // Step 2: Make API call if configured + if precond.APICall != nil { + apiResult, err := pe.executeAPICall(ctx, precond.APICall, execCtx, log) + if err != nil { + result.Status = StatusFailed + result.Error = err + + // Set ExecutionError for API call failure + execCtx.Adapter.ExecutionError = &ExecutionError{ + Phase: string(PhasePreconditions), + Step: precond.Name, + Message: err.Error(), + } + + return result, NewExecutorError(PhasePreconditions, precond.Name, "API call failed", err) + } + result.APICallMade = true + result.APIResponse = apiResult + + // Parse response as JSON + var responseData map[string]interface{} + if err := json.Unmarshal(apiResult, &responseData); err != nil { + result.Status = StatusFailed + result.Error = fmt.Errorf("failed to parse API response as JSON: %w", err) + + // Set ExecutionError for parse failure + execCtx.Adapter.ExecutionError = &ExecutionError{ + Phase: string(PhasePreconditions), + Step: precond.Name, + Message: err.Error(), + } + + return result, NewExecutorError(PhasePreconditions, precond.Name, "failed to parse API response", err) + } + + // Capture fields from response + if len(precond.Capture) > 0 { + for _, capture := range precond.Capture { + value, err := captureFieldFromData(responseData, capture.Field) + if err != nil { + log.Warning(fmt.Sprintf("Failed to capture field '%s' as '%s': %v", capture.Field, capture.Name, err)) + continue + } + result.CapturedFields[capture.Name] = value + execCtx.Params[capture.Name] = value + } + } + } + + // Step 3: Evaluate conditions + // Create evaluation context with all params + evalCtx := criteria.NewEvaluationContext() + evalCtx.SetVariablesFromMap(execCtx.Params) + + evaluator := criteria.NewEvaluator(evalCtx, log) + + // Evaluate using structured conditions or CEL expression + if len(precond.Conditions) > 0 { + condDefs := ToConditionDefs(precond.Conditions) + + condResult, err := evaluator.EvaluateConditionsWithResult(condDefs) + if err != nil { + result.Status = StatusFailed + result.Error = err + return result, NewExecutorError(PhasePreconditions, precond.Name, "condition evaluation failed", err) + } + + result.Matched = condResult.Matched + result.ConditionResults = condResult.Results + + // Record evaluation in execution context - reuse criteria.EvaluationResult directly + fieldResults := make(map[string]criteria.EvaluationResult, len(condResult.Results)) + for _, cr := range condResult.Results { + fieldResults[cr.Field] = cr + } + execCtx.AddConditionsEvaluation(PhasePreconditions, precond.Name, condResult.Matched, fieldResults) + } else if precond.Expression != "" { + // Evaluate CEL expression + celResult, err := evaluator.EvaluateCEL(strings.TrimSpace(precond.Expression)) + if err != nil { + result.Status = StatusFailed + result.Error = err + return result, NewExecutorError(PhasePreconditions, precond.Name, "CEL expression evaluation failed", err) + } + + result.Matched = celResult.Matched + result.CELResult = celResult + + // Record CEL evaluation in execution context + execCtx.AddCELEvaluation(PhasePreconditions, precond.Name, precond.Expression, celResult.Matched) + } else { + // No conditions specified - consider it matched + result.Matched = true + } + + if result.Matched { + log.Infof("Precondition '%s' satisfied", precond.Name) + } else { + log.Warning(fmt.Sprintf("Precondition '%s' not satisfied", precond.Name)) + } + + return result, nil +} + +// executeAPICall executes an API call and returns the response body for field capture +func (pe *PreconditionExecutor) executeAPICall(ctx context.Context, apiCall *config_loader.APICall, execCtx *ExecutionContext, log logger.Logger) ([]byte, error) { + resp, url, err := ExecuteAPICall(ctx, apiCall, execCtx, pe.apiClient, log) + + // Validate response - returns APIError with full metadata if validation fails + if validationErr := ValidateAPIResponse(resp, err, apiCall.Method, url); validationErr != nil { + return nil, validationErr + } + + return resp.Body, nil +} + +// captureFieldFromData captures a field from API response data using dot notation +func captureFieldFromData(data map[string]interface{}, path string) (interface{}, error) { + parts := strings.Split(path, ".") + var current interface{} = data + + for i, part := range parts { + switch v := current.(type) { + case map[string]interface{}: + val, ok := v[part] + if !ok { + return nil, fmt.Errorf("field '%s' not found at path '%s'", part, strings.Join(parts[:i+1], ".")) + } + current = val + case map[interface{}]interface{}: + val, ok := v[part] + if !ok { + return nil, fmt.Errorf("field '%s' not found at path '%s'", part, strings.Join(parts[:i+1], ".")) + } + current = val + default: + return nil, fmt.Errorf("cannot access field '%s': parent is not a map (got %T)", part, current) + } + } + + return current, nil +} + +// formatConditionDetails formats condition evaluation details for error messages +func formatConditionDetails(result PreconditionResult) string { + var details []string + + if result.CELResult != nil && result.CELResult.HasError() { + details = append(details, fmt.Sprintf("CEL error: %s", result.CELResult.ErrorReason)) + } + + for _, condResult := range result.ConditionResults { + if !condResult.Matched { + details = append(details, fmt.Sprintf("%s %s %v (actual: %v)", + condResult.Field, condResult.Operator, condResult.ExpectedValue, condResult.FieldValue)) + } + } + + if len(details) == 0 { + return "no specific details available" + } + + return strings.Join(details, "; ") +} + + diff --git a/internal/executor/resource_executor.go b/internal/executor/resource_executor.go new file mode 100644 index 0000000..8d39562 --- /dev/null +++ b/internal/executor/resource_executor.go @@ -0,0 +1,501 @@ +package executor + +import ( + "context" + "fmt" + "sort" + "strconv" + "time" + + "github.com/mitchellh/copystructure" + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/config_loader" + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/k8s_client" + apperrors "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/errors" + "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/logger" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// ResourceExecutor creates and updates Kubernetes resources +type ResourceExecutor struct { + k8sClient k8s_client.K8sClient +} + +// NewResourceExecutor creates a new resource executor +func NewResourceExecutor(k8sClient k8s_client.K8sClient) *ResourceExecutor { + return &ResourceExecutor{ + k8sClient: k8sClient, + } +} + +// ExecuteAll creates/updates all resources in sequence +// Returns results for each resource and updates the execution context +func (re *ResourceExecutor) ExecuteAll(ctx context.Context, resources []config_loader.Resource, execCtx *ExecutionContext, log logger.Logger) ([]ResourceResult, error) { + if execCtx.Resources == nil { + execCtx.Resources = make(map[string]*unstructured.Unstructured) + } + results := make([]ResourceResult, 0, len(resources)) + + for _, resource := range resources { + result, err := re.executeResource(ctx, resource, execCtx, log) + results = append(results, result) + + if err != nil { + return results, err + } + } + + return results, nil +} + +// executeResource creates or updates a single Kubernetes resource +func (re *ResourceExecutor) executeResource(ctx context.Context, resource config_loader.Resource, execCtx *ExecutionContext, log logger.Logger) (ResourceResult, error) { + result := ResourceResult{ + Name: resource.Name, + Status: StatusSuccess, + } + + log.Infof("Processing resource: %s", resource.Name) + + // Step 1: Build the manifest + manifest, err := re.buildManifest(resource, execCtx, log) + if err != nil { + result.Status = StatusFailed + result.Error = err + return result, NewExecutorError(PhaseResources, resource.Name, "failed to build manifest", err) + } + + // Extract resource info + gvk := manifest.GroupVersionKind() + result.Kind = gvk.Kind + result.Namespace = manifest.GetNamespace() + result.ResourceName = manifest.GetName() + + log.Infof("Manifest built: %s %s/%s (namespace: %s)", + gvk.Kind, gvk.Group, manifest.GetName(), manifest.GetNamespace()) + + // Step 2: Check for existing resource using discovery + var existingResource *unstructured.Unstructured + if resource.Discovery != nil { + existingResource, err = re.discoverExistingResource(ctx, gvk, resource.Discovery, execCtx) + if err != nil && !apierrors.IsNotFound(err) { + if apperrors.IsRetryableDiscoveryError(err) { + // Transient/network error - log and continue, we'll try to create + log.Warning(fmt.Sprintf("Transient discovery error (continuing): %v", err)) + } else { + // Fatal error (auth, permission, validation) - fail fast + result.Status = StatusFailed + result.Error = err + return result, NewExecutorError(PhaseResources, resource.Name, "failed to discover existing resource", err) + } + } + } + + // Step 3: Perform the appropriate operation + if existingResource != nil { + // Resource exists - update or recreate + if resource.RecreateOnChange { + result.Operation = OperationRecreate + result.Resource, err = re.recreateResource(ctx, existingResource, manifest, log) + } else { + result.Operation = OperationUpdate + result.Resource, err = re.updateResource(ctx, existingResource, manifest) + } + } else { + // Create new resource + result.Operation = OperationCreate + result.Resource, err = re.createResource(ctx, manifest) + } + + if err != nil { + result.Status = StatusFailed + result.Error = err + + // Set ExecutionError for K8s operation failure + execCtx.Adapter.ExecutionError = &ExecutionError{ + Phase: string(PhaseResources), + Step: resource.Name, + Message: err.Error(), + } + + return result, NewExecutorError(PhaseResources, resource.Name, + fmt.Sprintf("failed to %s resource", result.Operation), err) + } + + // Store resource in execution context + if result.Resource != nil { + execCtx.Resources[resource.Name] = result.Resource + } + + log.Infof("Resource %s completed: %s %s/%s (operation: %s)", + resource.Name, result.Kind, result.Namespace, result.ResourceName, result.Operation) + + return result, nil +} + +// buildManifest builds an unstructured manifest from the resource configuration +func (re *ResourceExecutor) buildManifest(resource config_loader.Resource, execCtx *ExecutionContext, log logger.Logger) (*unstructured.Unstructured, error) { + var manifestData map[string]interface{} + + // Check if manifest is inline or from ManifestItems (loaded from ref) + if len(resource.ManifestItems) > 0 { + // Use first manifest item (loaded from ref file) + manifestData = resource.ManifestItems[0] + } else if resource.Manifest != nil { + // Use inline manifest + switch m := resource.Manifest.(type) { + case map[string]interface{}: + manifestData = m + case map[interface{}]interface{}: + manifestData = convertToStringKeyMap(m) + default: + return nil, fmt.Errorf("unsupported manifest type: %T", resource.Manifest) + } + } else { + return nil, fmt.Errorf("no manifest specified for resource %s", resource.Name) + } + + // Deep copy to avoid modifying the original + manifestData = deepCopyMap(manifestData, log) + + // Render all template strings in the manifest + renderedData, err := renderManifestTemplates(manifestData, execCtx.Params) + if err != nil { + return nil, fmt.Errorf("failed to render manifest templates: %w", err) + } + + // Convert to unstructured + obj := &unstructured.Unstructured{Object: renderedData} + + // Validate required fields + if obj.GetAPIVersion() == "" { + return nil, fmt.Errorf("manifest missing apiVersion") + } + if obj.GetKind() == "" { + return nil, fmt.Errorf("manifest missing kind") + } + if obj.GetName() == "" { + return nil, fmt.Errorf("manifest missing metadata.name") + } + + return obj, nil +} + +// discoverExistingResource discovers an existing resource using the discovery config +func (re *ResourceExecutor) discoverExistingResource(ctx context.Context, gvk schema.GroupVersionKind, discovery *config_loader.DiscoveryConfig, execCtx *ExecutionContext) (*unstructured.Unstructured, error) { + if re.k8sClient == nil { + return nil, fmt.Errorf("kubernetes client not configured") + } + + // Render discovery config templates + namespace, err := renderTemplate(discovery.Namespace, execCtx.Params) + if err != nil { + return nil, fmt.Errorf("failed to render namespace template: %w", err) + } + + // Check if discovering by name + if discovery.ByName != "" { + name, err := renderTemplate(discovery.ByName, execCtx.Params) + if err != nil { + return nil, fmt.Errorf("failed to render byName template: %w", err) + } + return re.k8sClient.GetResource(ctx, gvk, namespace, name) + } + + // Discover by label selector + if discovery.BySelectors != nil && len(discovery.BySelectors.LabelSelector) > 0 { + // Render label selector templates + renderedLabels := make(map[string]string) + for k, v := range discovery.BySelectors.LabelSelector { + renderedK, err := renderTemplate(k, execCtx.Params) + if err != nil { + return nil, fmt.Errorf("failed to render label key template: %w", err) + } + renderedV, err := renderTemplate(v, execCtx.Params) + if err != nil { + return nil, fmt.Errorf("failed to render label value template: %w", err) + } + renderedLabels[renderedK] = renderedV + } + + labelSelector := k8s_client.BuildLabelSelector(renderedLabels) + + discoveryConfig := &k8s_client.DiscoveryConfig{ + Namespace: namespace, + LabelSelector: labelSelector, + } + + list, err := re.k8sClient.DiscoverResources(ctx, gvk, discoveryConfig) + if err != nil { + return nil, err + } + + if len(list.Items) == 0 { + return nil, apierrors.NewNotFound(schema.GroupResource{Group: gvk.Group, Resource: gvk.Kind}, "") + } + + // Sort by generation annotation (descending) to return the one with the latest generation + // This ensures deterministic behavior when multiple resources match the label selector + // Secondary sort by metadata.name for consistency when generations are equal + sort.Slice(list.Items, func(i, j int) bool { + genI := getGenerationAnnotationValue(&list.Items[i]) + genJ := getGenerationAnnotationValue(&list.Items[j]) + if genI != genJ { + return genI > genJ // Descending order - latest generation first + } + // Fall back to metadata.name for deterministic ordering when generations are equal + return list.Items[i].GetName() < list.Items[j].GetName() + }) + + return &list.Items[0], nil + } + + return nil, fmt.Errorf("discovery config must specify byName or bySelectors") +} + +// createResource creates a new Kubernetes resource +func (re *ResourceExecutor) createResource(ctx context.Context, manifest *unstructured.Unstructured) (*unstructured.Unstructured, error) { + if re.k8sClient == nil { + return nil, fmt.Errorf("kubernetes client not configured") + } + + return re.k8sClient.CreateResource(ctx, manifest) +} + +// updateResource updates an existing Kubernetes resource +func (re *ResourceExecutor) updateResource(ctx context.Context, existing, manifest *unstructured.Unstructured) (*unstructured.Unstructured, error) { + if re.k8sClient == nil { + return nil, fmt.Errorf("kubernetes client not configured") + } + + // Preserve resourceVersion from existing for update + manifest.SetResourceVersion(existing.GetResourceVersion()) + manifest.SetUID(existing.GetUID()) + + return re.k8sClient.UpdateResource(ctx, manifest) +} + +// recreateResource deletes and recreates a Kubernetes resource +// It waits for the resource to be fully deleted before creating the new one +// to avoid race conditions with Kubernetes asynchronous deletion +func (re *ResourceExecutor) recreateResource(ctx context.Context, existing, manifest *unstructured.Unstructured, log logger.Logger) (*unstructured.Unstructured, error) { + if re.k8sClient == nil { + return nil, fmt.Errorf("kubernetes client not configured") + } + + gvk := existing.GroupVersionKind() + namespace := existing.GetNamespace() + name := existing.GetName() + + // Delete the existing resource + log.Infof("Deleting resource for recreation: %s/%s", gvk.Kind, name) + if err := re.k8sClient.DeleteResource(ctx, gvk, namespace, name); err != nil { + return nil, fmt.Errorf("failed to delete resource for recreation: %w", err) + } + + // Wait for the resource to be fully deleted + log.Infof("Waiting for resource deletion to complete: %s/%s", gvk.Kind, name) + if err := re.waitForDeletion(ctx, gvk, namespace, name, log); err != nil { + return nil, fmt.Errorf("failed waiting for resource deletion: %w", err) + } + + // Create the new resource + log.Infof("Creating new resource after deletion confirmed: %s/%s", gvk.Kind, manifest.GetName()) + return re.k8sClient.CreateResource(ctx, manifest) +} + +// waitForDeletion polls until the resource is confirmed deleted or context times out +// Returns nil when the resource is confirmed gone (NotFound), or an error otherwise +func (re *ResourceExecutor) waitForDeletion(ctx context.Context, gvk schema.GroupVersionKind, namespace, name string, log logger.Logger) error { + const pollInterval = 100 * time.Millisecond + + ticker := time.NewTicker(pollInterval) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + log.Warning(fmt.Sprintf("Context cancelled/timed out while waiting for deletion of %s/%s", gvk.Kind, name)) + return fmt.Errorf("context cancelled while waiting for resource deletion: %w", ctx.Err()) + case <-ticker.C: + _, err := re.k8sClient.GetResource(ctx, gvk, namespace, name) + if err != nil { + // NotFound means the resource is deleted - this is success + if apierrors.IsNotFound(err) { + log.Infof("Resource deletion confirmed: %s/%s", gvk.Kind, name) + return nil + } + // Any other error is unexpected + log.Error(fmt.Sprintf("Error checking resource deletion status for %s/%s: %v", gvk.Kind, name, err)) + return fmt.Errorf("error checking deletion status: %w", err) + } + // Resource still exists, continue polling + log.V(2).Infof("Resource %s/%s still exists, waiting for deletion...", gvk.Kind, name) + } + } +} + +// convertToStringKeyMap converts map[interface{}]interface{} to map[string]interface{} +func convertToStringKeyMap(m map[interface{}]interface{}) map[string]interface{} { + result := make(map[string]interface{}) + for k, v := range m { + strKey := fmt.Sprintf("%v", k) + switch val := v.(type) { + case map[interface{}]interface{}: + result[strKey] = convertToStringKeyMap(val) + case []interface{}: + result[strKey] = convertSlice(val) + default: + result[strKey] = v + } + } + return result +} + +// convertSlice converts slice elements recursively +func convertSlice(s []interface{}) []interface{} { + result := make([]interface{}, len(s)) + for i, v := range s { + switch val := v.(type) { + case map[interface{}]interface{}: + result[i] = convertToStringKeyMap(val) + case []interface{}: + result[i] = convertSlice(val) + default: + result[i] = v + } + } + return result +} + +// deepCopyMap creates a deep copy of a map using github.com/mitchellh/copystructure. +// This handles non-JSON-serializable types (channels, functions, time.Time, etc.) +// and preserves type information (e.g., int64 stays int64, not float64). +// If deep copy fails, it falls back to a shallow copy and logs a warning. +// WARNING: Shallow copy means nested maps/slices will share references with the original, +// which could lead to unexpected mutations. +func deepCopyMap(m map[string]interface{}, log logger.Logger) map[string]interface{} { + if m == nil { + return nil + } + + copied, err := copystructure.Copy(m) + if err != nil { + // Fallback to shallow copy - LOG WARNING + if log != nil { + log.Warning(fmt.Sprintf("deepCopyMap: deep copy failed, falling back to shallow copy (mutations may affect original): %v", err)) + } + result := make(map[string]interface{}) + for k, v := range m { + result[k] = v + } + return result + } + + result, ok := copied.(map[string]interface{}) + if !ok { + // Should not happen, but handle gracefully + if log != nil { + log.Warning(fmt.Sprintf("deepCopyMap: unexpected type after copy (%T), falling back to shallow copy", copied)) + } + result := make(map[string]interface{}) + for k, v := range m { + result[k] = v + } + return result + } + + return result +} + +// renderManifestTemplates recursively renders all template strings in a manifest +func renderManifestTemplates(data map[string]interface{}, params map[string]interface{}) (map[string]interface{}, error) { + result := make(map[string]interface{}) + + for k, v := range data { + renderedKey, err := renderTemplate(k, params) + if err != nil { + return nil, fmt.Errorf("failed to render key '%s': %w", k, err) + } + + renderedValue, err := renderValue(v, params) + if err != nil { + return nil, fmt.Errorf("failed to render value for key '%s': %w", k, err) + } + + result[renderedKey] = renderedValue + } + + return result, nil +} + +// renderValue renders a value recursively +func renderValue(v interface{}, params map[string]interface{}) (interface{}, error) { + switch val := v.(type) { + case string: + return renderTemplate(val, params) + case map[string]interface{}: + return renderManifestTemplates(val, params) + case map[interface{}]interface{}: + converted := convertToStringKeyMap(val) + return renderManifestTemplates(converted, params) + case []interface{}: + result := make([]interface{}, len(val)) + for i, item := range val { + rendered, err := renderValue(item, params) + if err != nil { + return nil, err + } + result[i] = rendered + } + return result, nil + default: + return v, nil + } +} + +// getGenerationAnnotationValue extracts the generation annotation value from a resource +// Returns 0 if the resource is nil, has no annotations, or the annotation cannot be parsed +func getGenerationAnnotationValue(obj *unstructured.Unstructured) int64 { + if obj == nil { + return 0 + } + annotations := obj.GetAnnotations() + if annotations == nil { + return 0 + } + genStr, ok := annotations[AnnotationGeneration] + if !ok || genStr == "" { + return 0 + } + // Try to parse as integer directly + gen, err := strconv.ParseInt(genStr, 10, 64) + if err != nil { + // Generation value is not a valid integer, return 0 + return 0 + } + return gen +} + +// GetResourceAsMap converts an unstructured resource to a map for CEL evaluation +func GetResourceAsMap(resource *unstructured.Unstructured) map[string]interface{} { + if resource == nil { + return nil + } + return resource.Object +} + +// BuildResourcesMap builds a map of all resources for CEL evaluation. +// Resource names are used directly as keys (snake_case and camelCase both work in CEL). +// Name validation (no hyphens, no duplicates) is done at config load time. +func BuildResourcesMap(resources map[string]*unstructured.Unstructured) map[string]interface{} { + result := make(map[string]interface{}) + for name, resource := range resources { + if resource != nil { + result[name] = resource.Object + } + } + return result +} + diff --git a/internal/executor/resource_executor_test.go b/internal/executor/resource_executor_test.go new file mode 100644 index 0000000..3e27a58 --- /dev/null +++ b/internal/executor/resource_executor_test.go @@ -0,0 +1,263 @@ +package executor + +import ( + "context" + "testing" + + "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/logger" + "github.com/stretchr/testify/assert" +) + + +func TestDeepCopyMap_BasicTypes(t *testing.T) { + log := &mockLogger{} + + original := map[string]interface{}{ + "string": "hello", + "int": 42, + "float": 3.14, + "bool": true, + "null": nil, + } + + copied := deepCopyMap(original, log) + + // Verify values are copied correctly + assert.Equal(t, "hello", copied["string"]) + assert.Equal(t, 42, copied["int"]) // copystructure preserves int (unlike JSON which converts to float64) + assert.Equal(t, 3.14, copied["float"]) + assert.Equal(t, true, copied["bool"]) + assert.Nil(t, copied["null"]) + + // Verify no warnings logged + assert.Empty(t, log.warnings, "No warnings expected for basic types") + + // Verify mutation doesn't affect original + copied["string"] = "modified" + assert.Equal(t, "hello", original["string"], "Original should not be modified") +} + +func TestDeepCopyMap_NestedMaps(t *testing.T) { + log := &mockLogger{} + + original := map[string]interface{}{ + "level1": map[string]interface{}{ + "level2": map[string]interface{}{ + "value": "deep", + }, + }, + } + + copied := deepCopyMap(original, log) + + // Verify deep copy works + assert.Empty(t, log.warnings) + + // Modify the copied nested map + level1 := copied["level1"].(map[string]interface{}) + level2 := level1["level2"].(map[string]interface{}) + level2["value"] = "modified" + + // Verify original is NOT modified (deep copy worked) + originalLevel1 := original["level1"].(map[string]interface{}) + originalLevel2 := originalLevel1["level2"].(map[string]interface{}) + assert.Equal(t, "deep", originalLevel2["value"], "Original nested value should not be modified") +} + +func TestDeepCopyMap_Slices(t *testing.T) { + log := &mockLogger{} + + original := map[string]interface{}{ + "items": []interface{}{"a", "b", "c"}, + "nested": []interface{}{ + map[string]interface{}{"key": "value"}, + }, + } + + copied := deepCopyMap(original, log) + + assert.Empty(t, log.warnings) + + // Modify copied slice + copiedItems := copied["items"].([]interface{}) + copiedItems[0] = "modified" + + // Verify original is NOT modified + originalItems := original["items"].([]interface{}) + assert.Equal(t, "a", originalItems[0], "Original slice should not be modified") +} + +func TestDeepCopyMap_Channel(t *testing.T) { + // copystructure handles channels properly (creates new channel) + log := &mockLogger{} + + ch := make(chan int, 5) + original := map[string]interface{}{ + "channel": ch, + "normal": "value", + } + + copied := deepCopyMap(original, log) + + // copystructure handles channels - no warning expected + assert.Empty(t, log.warnings, "copystructure handles channels without falling back to shallow copy") + + // Normal values are copied + assert.Equal(t, "value", copied["normal"]) + + // Verify channel exists in copied map + copiedCh, ok := copied["channel"].(chan int) + assert.True(t, ok, "Channel should be present in copied map") + assert.NotNil(t, copiedCh, "Copied channel should not be nil") +} + +func TestDeepCopyMap_Function(t *testing.T) { + // copystructure handles functions (copies the function pointer) + log := &mockLogger{} + + fn := func() string { return "hello" } + original := map[string]interface{}{ + "func": fn, + "normal": "value", + } + + copied := deepCopyMap(original, log) + + // copystructure handles functions - no warning expected + assert.Empty(t, log.warnings, "copystructure handles functions without falling back to shallow copy") + + // Normal values are copied + assert.Equal(t, "value", copied["normal"]) + + // Function is preserved + copiedFn := copied["func"].(func() string) + assert.Equal(t, "hello", copiedFn(), "Copied function should work") +} + +func TestDeepCopyMap_NestedWithChannel(t *testing.T) { + // Test that nested maps are deep copied even when channels are present + log := &mockLogger{} + + ch := make(chan int) + nested := map[string]interface{}{"mutable": "original"} + original := map[string]interface{}{ + "channel": ch, + "nested": nested, + } + + copied := deepCopyMap(original, log) + + // copystructure handles this properly - no warning expected + assert.Empty(t, log.warnings) + + // Modify the copied nested map + copiedNested := copied["nested"].(map[string]interface{}) + copiedNested["mutable"] = "MUTATED" + + // Original should NOT be affected (deep copy works with copystructure) + assert.Equal(t, "original", nested["mutable"], + "Deep copy: original nested map should NOT be affected by mutation") +} + +func TestDeepCopyMap_EmptyMap(t *testing.T) { + log := &mockLogger{} + + original := map[string]interface{}{} + copied := deepCopyMap(original, log) + + assert.Empty(t, log.warnings) + assert.NotNil(t, copied) + assert.Empty(t, copied) +} + +func TestDeepCopyMap_NilLogger(t *testing.T) { + // Should not panic when logger is nil + original := map[string]interface{}{ + "string": "value", + "nested": map[string]interface{}{ + "key": "nested_value", + }, + } + + // Should not panic even with nil logger + copied := deepCopyMap(original, nil) + + assert.Equal(t, "value", copied["string"]) + + // Verify deep copy works + copiedNested := copied["nested"].(map[string]interface{}) + copiedNested["key"] = "modified" + + originalNested := original["nested"].(map[string]interface{}) + assert.Equal(t, "nested_value", originalNested["key"], "Original should not be modified") +} + +func TestDeepCopyMap_NilMap(t *testing.T) { + log := &mockLogger{} + + copied := deepCopyMap(nil, log) + + assert.Nil(t, copied) + assert.Empty(t, log.warnings) +} + +func TestDeepCopyMap_KubernetesManifest(t *testing.T) { + // Test with a realistic Kubernetes manifest structure + log := &mockLogger{} + + original := map[string]interface{}{ + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": map[string]interface{}{ + "name": "test-config", + "namespace": "default", + "labels": map[string]interface{}{ + "app": "test", + }, + }, + "data": map[string]interface{}{ + "key1": "value1", + "key2": "value2", + }, + } + + copied := deepCopyMap(original, log) + + assert.Empty(t, log.warnings) + + // Modify copied manifest + copiedMetadata := copied["metadata"].(map[string]interface{}) + copiedLabels := copiedMetadata["labels"].(map[string]interface{}) + copiedLabels["app"] = "modified" + + // Verify original is NOT modified + originalMetadata := original["metadata"].(map[string]interface{}) + originalLabels := originalMetadata["labels"].(map[string]interface{}) + assert.Equal(t, "test", originalLabels["app"], "Original manifest should not be modified") +} + +// TestDeepCopyMap_Context ensures the function is used correctly in context +func TestDeepCopyMap_RealWorldContext(t *testing.T) { + // This simulates how deepCopyMap is used in executeResource + log := logger.NewLogger(context.Background()) + + manifest := map[string]interface{}{ + "apiVersion": "v1", + "kind": "Namespace", + "metadata": map[string]interface{}{ + "name": "{{ .namespace }}", + }, + } + + // Deep copy before template rendering + copied := deepCopyMap(manifest, log) + + // Simulate template rendering modifying the copy + copiedMetadata := copied["metadata"].(map[string]interface{}) + copiedMetadata["name"] = "rendered-namespace" + + // Original template should remain unchanged for next iteration + originalMetadata := manifest["metadata"].(map[string]interface{}) + assert.Equal(t, "{{ .namespace }}", originalMetadata["name"]) +} + diff --git a/internal/executor/types.go b/internal/executor/types.go new file mode 100644 index 0000000..02e1cd8 --- /dev/null +++ b/internal/executor/types.go @@ -0,0 +1,366 @@ +package executor + +import ( + "context" + "fmt" + "time" + + "github.com/cloudevents/sdk-go/v2/event" + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/config_loader" + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/criteria" + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/hyperfleet_api" + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/k8s_client" + "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/logger" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" +) + +// ExecutionPhase represents which phase of execution +type ExecutionPhase string + +const ( + // PhaseParamExtraction is the parameter extraction phase + PhaseParamExtraction ExecutionPhase = "param_extraction" + // PhasePreconditions is the precondition evaluation phase + PhasePreconditions ExecutionPhase = "preconditions" + // PhaseResources is the resource creation/update phase + PhaseResources ExecutionPhase = "resources" + // PhasePostActions is the post-action execution phase + PhasePostActions ExecutionPhase = "post_actions" +) + +// Kubernetes annotation keys +const ( + // AnnotationGeneration is the annotation key for tracking resource generation + AnnotationGeneration = "hyperfleet.io/generation" +) + +// ExecutionStatus represents the status of execution (runtime perspective) +type ExecutionStatus string + +const ( + // StatusSuccess indicates successful execution (adapter ran successfully) + StatusSuccess ExecutionStatus = "success" + // StatusFailed indicates failed execution (process execution error: API timeout, parse error, K8s error, etc.) + StatusFailed ExecutionStatus = "failed" +) + +// ExecutorConfig holds configuration for the executor +type ExecutorConfig struct { + // AdapterConfig is the loaded adapter configuration + AdapterConfig *config_loader.AdapterConfig + // APIClient is the HyperFleet API client + APIClient hyperfleet_api.Client + // K8sClient is the Kubernetes client (optional, can be nil if not needed) + // Use k8s_client.K8sClient interface for easy mocking in tests + K8sClient k8s_client.K8sClient + // Logger is the logger instance + Logger logger.Logger +} + +// Executor processes CloudEvents according to the adapter configuration +type Executor struct { + config *ExecutorConfig + precondExecutor *PreconditionExecutor + resourceExecutor *ResourceExecutor + postActionExecutor *PostActionExecutor +} + +// ExecutionResult contains the result of processing an event +type ExecutionResult struct { + // EventID is the ID of the processed event + EventID string + // Status is the overall execution status (runtime perspective) + Status ExecutionStatus + // Phase is the phase where execution ended + Phase ExecutionPhase + // Params contains the extracted parameters + Params map[string]interface{} + // PreconditionResults contains results of precondition evaluations + PreconditionResults []PreconditionResult + // ResourceResults contains results of resource operations + ResourceResults []ResourceResult + // PostActionResults contains results of post-action executions + PostActionResults []PostActionResult + // Error is the error if Status is StatusFailed (process execution error only) + Error error + // ErrorReason is a human-readable error reason (process execution error only) + ErrorReason string + // ResourcesSkipped indicates if resources were skipped (business outcome) + ResourcesSkipped bool + // SkipReason is why resources were skipped (e.g., "precondition not met") + SkipReason string + // ExecutionContext contains the full execution context (for testing and debugging) + ExecutionContext *ExecutionContext +} + +// PreconditionResult contains the result of a single precondition evaluation +type PreconditionResult struct { + // Name is the precondition name + Name string + // Status is the result status + Status ExecutionStatus + // Matched indicates if conditions were satisfied + Matched bool + // APICallMade indicates if an API call was made + APICallMade bool + // APIResponse contains the raw API response (if APICallMade) + APIResponse []byte + // CapturedFields contains fields captured from the API response + CapturedFields map[string]interface{} + // ConditionResults contains individual condition evaluation results + ConditionResults []criteria.EvaluationResult + // CELResult contains CEL evaluation result (if expression was used) + CELResult *criteria.CELResult + // Error is the error if Status is StatusFailed + Error error +} + +// ResourceResult contains the result of a single resource operation +type ResourceResult struct { + // Name is the resource name from config + Name string + // Kind is the Kubernetes resource kind + Kind string + // Namespace is the resource namespace + Namespace string + // ResourceName is the actual K8s resource name + ResourceName string + // Status is the result status + Status ExecutionStatus + // Operation is the operation performed (create, update, skip) + Operation ResourceOperation + // Resource is the created/updated resource (if successful) + Resource *unstructured.Unstructured + // Error is the error if Status is StatusFailed + Error error +} + +// ResourceOperation represents the operation performed on a resource +type ResourceOperation string + +const ( + // OperationCreate indicates a resource was created + OperationCreate ResourceOperation = "create" + // OperationUpdate indicates a resource was updated + OperationUpdate ResourceOperation = "update" + // OperationRecreate indicates a resource was deleted and recreated + OperationRecreate ResourceOperation = "recreate" + // OperationSkip indicates no operation was needed + OperationSkip ResourceOperation = "skip" +) + +// PostActionResult contains the result of a single post-action execution +type PostActionResult struct { + // Name is the post-action name + Name string + // Status is the result status + Status ExecutionStatus + // Skipped indicates if the action was skipped due to when condition + Skipped bool + // SkipReason is the reason for skipping + SkipReason string + // APICallMade indicates if an API call was made + APICallMade bool + // APIResponse contains the raw API response (if APICallMade) + APIResponse []byte + // HTTPStatus is the HTTP status code of the API response + HTTPStatus int + // Error is the error if Status is StatusFailed + Error error +} + +// ExecutionContext holds runtime context during execution +type ExecutionContext struct { + // Ctx is the Go context + Ctx context.Context + // Event is the CloudEvent being processed (for metadata only) + Event *event.Event + // EventData is the parsed CloudEvent data payload + EventData map[string]interface{} + // Params holds extracted parameters and captured fields + // - Populated during param extraction phase with event/env data + // - Populated during precondition phase with captured API response fields + Params map[string]interface{} + // Resources holds created/updated K8s resources keyed by resource name + Resources map[string]*unstructured.Unstructured + // Adapter holds adapter execution metadata + Adapter AdapterMetadata + // Evaluations tracks all condition evaluations for debugging/auditing + Evaluations []EvaluationRecord +} + +// EvaluationRecord tracks a single condition evaluation during execution +type EvaluationRecord struct { + // Phase is the execution phase where this evaluation occurred + Phase ExecutionPhase + // Name is the name of the precondition/resource/action being evaluated + Name string + // EvaluationType indicates what kind of evaluation was performed + EvaluationType EvaluationType + // Expression is the CEL expression or condition description + Expression string + // Matched indicates whether the evaluation succeeded + Matched bool + // FieldResults contains individual field evaluation results keyed by field path (for structured conditions) + // Reuses criteria.EvaluationResult to avoid duplication + FieldResults map[string]criteria.EvaluationResult + // Timestamp is when the evaluation occurred + Timestamp time.Time +} + +// EvaluationType indicates the type of evaluation performed +type EvaluationType string + +const ( + // EvaluationTypeCEL indicates a CEL expression evaluation + EvaluationTypeCEL EvaluationType = "cel" + // EvaluationTypeConditions indicates structured conditions evaluation + EvaluationTypeConditions EvaluationType = "conditions" +) + +// AdapterMetadata holds adapter execution metadata for CEL expressions +type AdapterMetadata struct { + // ExecutionStatus is the overall execution status (runtime perspective: "success", "failed") + ExecutionStatus string + // ErrorReason is the error reason if failed (process execution errors only) + ErrorReason string + // ErrorMessage is the error message if failed (process execution errors only) + ErrorMessage string + // ExecutionError contains detailed error information if execution failed + ExecutionError *ExecutionError `json:"executionError,omitempty"` + // ResourcesSkipped indicates if resources were skipped (business outcome) + ResourcesSkipped bool `json:"resourcesSkipped,omitempty"` + // SkipReason is why resources were skipped (e.g., "precondition not met") + SkipReason string `json:"skipReason,omitempty"` +} + +// ExecutionError represents a structured execution error +type ExecutionError struct { + // Phase is the execution phase where the error occurred + Phase string `json:"phase"` + // Step is the specific step (precondition/resource/action name) that failed + Step string `json:"step"` + // Message is the error message (includes all relevant details) + Message string `json:"message"` +} + +// NewExecutionContext creates a new execution context +func NewExecutionContext(ctx context.Context, evt *event.Event, eventData map[string]interface{}) *ExecutionContext { + return &ExecutionContext{ + Ctx: ctx, + Event: evt, + EventData: eventData, + Params: make(map[string]interface{}), + Resources: make(map[string]*unstructured.Unstructured), + Evaluations: make([]EvaluationRecord, 0), + Adapter: AdapterMetadata{ + ExecutionStatus: string(StatusSuccess), + }, + } +} + +// AddEvaluation records a condition evaluation result +func (ec *ExecutionContext) AddEvaluation(phase ExecutionPhase, name string, evalType EvaluationType, expression string, matched bool, fieldResults map[string]criteria.EvaluationResult) { + ec.Evaluations = append(ec.Evaluations, EvaluationRecord{ + Phase: phase, + Name: name, + EvaluationType: evalType, + Expression: expression, + Matched: matched, + FieldResults: fieldResults, + Timestamp: time.Now(), + }) +} + +// AddCELEvaluation is a convenience method for recording CEL expression evaluations +func (ec *ExecutionContext) AddCELEvaluation(phase ExecutionPhase, name, expression string, matched bool) { + ec.AddEvaluation(phase, name, EvaluationTypeCEL, expression, matched, nil) +} + +// AddConditionsEvaluation is a convenience method for recording structured conditions evaluations +func (ec *ExecutionContext) AddConditionsEvaluation(phase ExecutionPhase, name string, matched bool, fieldResults map[string]criteria.EvaluationResult) { + ec.AddEvaluation(phase, name, EvaluationTypeConditions, "", matched, fieldResults) +} + +// GetEvaluationsByPhase returns all evaluations for a specific phase +func (ec *ExecutionContext) GetEvaluationsByPhase(phase ExecutionPhase) []EvaluationRecord { + var results []EvaluationRecord + for _, eval := range ec.Evaluations { + if eval.Phase == phase { + results = append(results, eval) + } + } + return results +} + +// GetFailedEvaluations returns all evaluations that did not match +func (ec *ExecutionContext) GetFailedEvaluations() []EvaluationRecord { + var results []EvaluationRecord + for _, eval := range ec.Evaluations { + if !eval.Matched { + results = append(results, eval) + } + } + return results +} + +// SetError sets the error status in adapter metadata (for runtime failures) +func (ec *ExecutionContext) SetError(reason, message string) { + ec.Adapter.ExecutionStatus = string(StatusFailed) + ec.Adapter.ErrorReason = reason + ec.Adapter.ErrorMessage = message +} + +// SetSkipped sets the status to indicate execution was skipped (not an error) +func (ec *ExecutionContext) SetSkipped(reason, message string) { + // Execution was successful, but resources were skipped due to business logic + ec.Adapter.ExecutionStatus = string(StatusSuccess) + ec.Adapter.ResourcesSkipped = true + ec.Adapter.SkipReason = reason + if message != "" { + ec.Adapter.SkipReason = message // Use message if provided for more detail + } +} + +// ExecutorError represents an error during execution +type ExecutorError struct { + Phase ExecutionPhase + Step string + Message string + Err error +} + +func (e *ExecutorError) Error() string { + if e.Err != nil { + return fmt.Sprintf("[%s] %s: %s: %v", e.Phase, e.Step, e.Message, e.Err) + } + return fmt.Sprintf("[%s] %s: %s", e.Phase, e.Step, e.Message) +} + +func (e *ExecutorError) Unwrap() error { + return e.Err +} + +// NewExecutorError creates a new executor error +func NewExecutorError(phase ExecutionPhase, step, message string, err error) *ExecutorError { + return &ExecutorError{ + Phase: phase, + Step: step, + Message: message, + Err: err, + } +} + +// PreconditionsOutcome represents the high-level result of precondition evaluation +type PreconditionsOutcome struct { + // AllMatched indicates whether all preconditions were satisfied (business outcome) + AllMatched bool + // Results contains individual precondition results + Results []PreconditionResult + // Error contains execution errors (API failures, parse errors, etc.) + // nil if preconditions were evaluated successfully, even if not matched + Error error + // NotMetReason provides details when AllMatched is false + NotMetReason string +} + diff --git a/internal/executor/utils.go b/internal/executor/utils.go new file mode 100644 index 0000000..387d37d --- /dev/null +++ b/internal/executor/utils.go @@ -0,0 +1,299 @@ +package executor + +import ( + "bytes" + "context" + "fmt" + "net/http" + "strings" + "text/template" + "time" + + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/config_loader" + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/criteria" + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/hyperfleet_api" + apierrors "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/errors" + "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/logger" +) + +// ToConditionDefs converts config_loader.Condition slice to criteria.ConditionDef slice. +// This centralizes the conversion logic that was previously repeated in multiple places. +func ToConditionDefs(conditions []config_loader.Condition) []criteria.ConditionDef { + defs := make([]criteria.ConditionDef, len(conditions)) + for i, cond := range conditions { + defs[i] = criteria.ConditionDef{ + Field: cond.Field, + Operator: criteria.Operator(cond.Operator), + Value: cond.Value, + } + } + return defs +} + +// ExecuteLogAction executes a log action with the given context +// The message is rendered as a Go template with access to all params +// This is a shared utility function used by both PreconditionExecutor and PostActionExecutor +func ExecuteLogAction(logAction *config_loader.LogAction, execCtx *ExecutionContext, log logger.Logger) { + if logAction == nil || logAction.Message == "" { + return + } + + // Render the message template + message, err := renderTemplate(logAction.Message, execCtx.Params) + if err != nil { + log.Error(fmt.Sprintf("failed to render log message: %v", err)) + return + } + + // Log at the specified level (default: info) + level := strings.ToLower(logAction.Level) + if level == "" { + level = "info" + } + + switch level { + case "debug": + log.V(2).Infof("[config] %s", message) + case "info": + log.Infof("[config] %s", message) + case "warning", "warn": + log.Warning(fmt.Sprintf("[config] %s", message)) + case "error": + log.Error(fmt.Sprintf("[config] %s", message)) + default: + log.Infof("[config] %s", message) + } + +} + +// ExecuteAPICall executes an API call with the given configuration and returns the response and rendered URL +// This is a shared utility function used by both PreconditionExecutor and PostActionExecutor +// On error, it returns an APIError with full context (method, URL, status, body, attempts, duration) +// Returns: response, renderedURL, error +func ExecuteAPICall(ctx context.Context, apiCall *config_loader.APICall, execCtx *ExecutionContext, apiClient hyperfleet_api.Client, log logger.Logger) (*hyperfleet_api.Response, string, error) { + if apiCall == nil { + return nil, "", fmt.Errorf("apiCall is nil") + } + + // Render URL template + url, err := renderTemplate(apiCall.URL, execCtx.Params) + if err != nil { + return nil, "", fmt.Errorf("failed to render URL template: %w", err) + } + + log.Infof("Making API call: %s %s", apiCall.Method, url) + + // Build request options + opts := make([]hyperfleet_api.RequestOption, 0) + + // Add headers + headers := make(map[string]string) + for _, h := range apiCall.Headers { + headerValue, err := renderTemplate(h.Value, execCtx.Params) + if err != nil { + return nil, url, fmt.Errorf("failed to render header '%s' template: %w", h.Name, err) + } + headers[h.Name] = headerValue + } + if len(headers) > 0 { + opts = append(opts, hyperfleet_api.WithHeaders(headers)) + } + + // Set timeout if specified + if apiCall.Timeout != "" { + timeout, err := time.ParseDuration(apiCall.Timeout) + if err == nil { + opts = append(opts, hyperfleet_api.WithRequestTimeout(timeout)) + } else { + log.Warning(fmt.Sprintf("failed to parse timeout '%s': %v, using default timeout", apiCall.Timeout, err)) + } + } + + // Set retry configuration + if apiCall.RetryAttempts > 0 { + opts = append(opts, hyperfleet_api.WithRequestRetryAttempts(apiCall.RetryAttempts)) + } + if apiCall.RetryBackoff != "" { + backoff := hyperfleet_api.BackoffStrategy(apiCall.RetryBackoff) + opts = append(opts, hyperfleet_api.WithRequestRetryBackoff(backoff)) + } + + // Execute request based on method + var resp *hyperfleet_api.Response + switch strings.ToUpper(apiCall.Method) { + case http.MethodGet: + resp, err = apiClient.Get(ctx, url, opts...) + case http.MethodPost: + body := []byte(apiCall.Body) + if apiCall.Body != "" { + body, err = renderTemplateBytes(apiCall.Body, execCtx.Params) + if err != nil { + return nil, url, fmt.Errorf("failed to render body template: %w", err) + } + } + resp, err = apiClient.Post(ctx, url, body, opts...) + case http.MethodPut: + body := []byte(apiCall.Body) + if apiCall.Body != "" { + body, err = renderTemplateBytes(apiCall.Body, execCtx.Params) + if err != nil { + return nil, "", fmt.Errorf("failed to render body template: %w", err) + } + } + resp, err = apiClient.Put(ctx, url, body, opts...) + case http.MethodPatch: + body := []byte(apiCall.Body) + if apiCall.Body != "" { + body, err = renderTemplateBytes(apiCall.Body, execCtx.Params) + if err != nil { + return nil, "", fmt.Errorf("failed to render body template: %w", err) + } + } + resp, err = apiClient.Patch(ctx, url, body, opts...) + case http.MethodDelete: + resp, err = apiClient.Delete(ctx, url, opts...) + default: + return nil, url, fmt.Errorf("unsupported HTTP method: %s", apiCall.Method) + } + + if err != nil { + // Return response AND error - response may contain useful details even on error + // (e.g., HTTP status code, response body) + if resp != nil { + log.Warning(fmt.Sprintf("API call failed: %d %s, error: %v", resp.StatusCode, resp.Status, err)) + // Wrap as APIError with full context + apiErr := apierrors.NewAPIError( + apiCall.Method, + url, + resp.StatusCode, + resp.Status, + resp.Body, + resp.Attempts, + resp.Duration, + err, + ) + return resp, url, apiErr + } else { + log.Warning(fmt.Sprintf("API call failed: %v", err)) + // No response - create APIError with minimal context + apiErr := apierrors.NewAPIError( + apiCall.Method, + url, + 0, + "", + nil, + 0, + 0, + err, + ) + return resp, url, apiErr + } + } + if resp == nil { + nilErr := fmt.Errorf("API client returned nil response without error") + return nil, url, apierrors.NewAPIError(apiCall.Method, url, 0, "", nil, 0, 0, nilErr) + } + + log.Infof("API call completed: %d %s", resp.StatusCode, resp.Status) + return resp, url, nil +} + +// ValidateAPIResponse checks if an API response is valid and successful +// Returns an APIError with full context if response is nil or unsuccessful +// method and url are used to construct APIError with proper context +func ValidateAPIResponse(resp *hyperfleet_api.Response, err error, method, url string) error { + if err != nil { + // If it's already an APIError, return it as-is + if _, ok := apierrors.IsAPIError(err); ok { + return err + } + // Otherwise wrap it as APIError + return apierrors.NewAPIError(method, url, 0, "", nil, 0, 0, err) + } + + if resp == nil { + nilErr := fmt.Errorf("API response is nil") + return apierrors.NewAPIError(method, url, 0, "", nil, 0, 0, nilErr) + } + + if !resp.IsSuccess() { + errMsg := fmt.Sprintf("API returned non-success status: %d %s", resp.StatusCode, resp.Status) + if len(resp.Body) > 0 { + errMsg = fmt.Sprintf("%s, response body: %s", errMsg, string(resp.Body)) + } + baseErr := fmt.Errorf("%s", errMsg) + return apierrors.NewAPIError( + method, + url, + resp.StatusCode, + resp.Status, + resp.Body, + resp.Attempts, + resp.Duration, + baseErr, + ) + } + + return nil +} + +// renderTemplate renders a Go template string with the given data +// This is a shared utility used across preconditions, resources, and post-actions +func renderTemplate(templateStr string, data map[string]interface{}) (string, error) { + // If no template delimiters, return as-is + if !strings.Contains(templateStr, "{{") { + return templateStr, nil + } + + tmpl, err := template.New("template").Option("missingkey=error").Parse(templateStr) + if err != nil { + return "", fmt.Errorf("failed to parse template: %w", err) + } + + var buf bytes.Buffer + if err := tmpl.Execute(&buf, data); err != nil { + return "", fmt.Errorf("failed to execute template: %w", err) + } + + return buf.String(), nil +} + +// renderTemplateBytes renders a Go template string and returns bytes +func renderTemplateBytes(templateStr string, data map[string]interface{}) ([]byte, error) { + result, err := renderTemplate(templateStr, data) + if err != nil { + return nil, err + } + return []byte(result), nil +} + +// executionErrorToMap converts an ExecutionError struct to a map for CEL evaluation +// Returns nil if the ExecutionError pointer is nil +func executionErrorToMap(execErr *ExecutionError) interface{} { + if execErr == nil { + return nil + } + + return map[string]interface{}{ + "phase": execErr.Phase, + "step": execErr.Step, + "message": execErr.Message, + } +} + +// adapterMetadataToMap converts AdapterMetadata struct to a map for CEL evaluation +func adapterMetadataToMap(adapter *AdapterMetadata) map[string]interface{} { + if adapter == nil { + return map[string]interface{}{} + } + + return map[string]interface{}{ + "executionStatus": adapter.ExecutionStatus, + "resourcesSkipped": adapter.ResourcesSkipped, + "skipReason": adapter.SkipReason, + "errorReason": adapter.ErrorReason, + "errorMessage": adapter.ErrorMessage, + "executionError": executionErrorToMap(adapter.ExecutionError), + } +} + diff --git a/internal/executor/utils_test.go b/internal/executor/utils_test.go new file mode 100644 index 0000000..176ae1c --- /dev/null +++ b/internal/executor/utils_test.go @@ -0,0 +1,1093 @@ +package executor + +import ( + "errors" + "fmt" + "testing" + "time" + + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/config_loader" + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/criteria" + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/hyperfleet_api" + apierrors "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" +) + +func TestValidateAPIResponse_NilError_SuccessResponse(t *testing.T) { + resp := &hyperfleet_api.Response{ + StatusCode: 200, + Status: "200 OK", + Body: []byte(`{"status":"ok"}`), + Attempts: 1, + Duration: 100 * time.Millisecond, + } + + err := ValidateAPIResponse(resp, nil, "GET", "http://example.com/api") + + assert.NoError(t, err) +} + +func TestValidateAPIResponse_NilError_NilResponse(t *testing.T) { + err := ValidateAPIResponse(nil, nil, "GET", "http://example.com/api") + + require.Error(t, err) + + // Should be wrapped as APIError + apiErr, ok := apierrors.IsAPIError(err) + require.True(t, ok, "Expected error to be APIError") + assert.Equal(t, "GET", apiErr.Method) + assert.Equal(t, "http://example.com/api", apiErr.URL) + assert.Equal(t, 0, apiErr.StatusCode) + assert.Contains(t, apiErr.Error(), "nil") +} + +func TestValidateAPIResponse_WithError_AlreadyAPIError(t *testing.T) { + // If error is already an APIError, it should be returned as-is + originalErr := apierrors.NewAPIError( + "POST", + "http://example.com/api/create", + 503, + "503 Service Unavailable", + []byte("service down"), + 3, + 5*time.Second, + errors.New("connection refused"), + ) + + err := ValidateAPIResponse(nil, originalErr, "GET", "http://other.com") + + require.Error(t, err) + + // Should be the same error, not re-wrapped + apiErr, ok := apierrors.IsAPIError(err) + require.True(t, ok) + assert.Equal(t, "POST", apiErr.Method) // Original method preserved + assert.Equal(t, "http://example.com/api/create", apiErr.URL) + assert.Equal(t, 503, apiErr.StatusCode) +} + +func TestValidateAPIResponse_WithError_NonAPIError(t *testing.T) { + // Non-APIError should be wrapped + originalErr := errors.New("network timeout") + + err := ValidateAPIResponse(nil, originalErr, "PUT", "http://example.com/api/update") + + require.Error(t, err) + + apiErr, ok := apierrors.IsAPIError(err) + require.True(t, ok, "Expected error to be wrapped as APIError") + assert.Equal(t, "PUT", apiErr.Method) + assert.Equal(t, "http://example.com/api/update", apiErr.URL) + assert.Equal(t, 0, apiErr.StatusCode) // No status code for network errors + assert.True(t, errors.Is(err, originalErr), "Original error should be unwrappable") +} + +func TestValidateAPIResponse_NonSuccessStatusCodes(t *testing.T) { + tests := []struct { + name string + statusCode int + status string + body []byte + expectError bool + expectBody bool + }{ + { + name: "400 Bad Request", + statusCode: 400, + status: "400 Bad Request", + body: []byte(`{"error":"invalid input"}`), + expectError: true, + expectBody: true, + }, + { + name: "401 Unauthorized", + statusCode: 401, + status: "401 Unauthorized", + body: []byte(`{"error":"invalid token"}`), + expectError: true, + expectBody: true, + }, + { + name: "403 Forbidden", + statusCode: 403, + status: "403 Forbidden", + body: nil, + expectError: true, + expectBody: false, + }, + { + name: "404 Not Found", + statusCode: 404, + status: "404 Not Found", + body: []byte(`{"message":"resource not found"}`), + expectError: true, + expectBody: true, + }, + { + name: "429 Too Many Requests", + statusCode: 429, + status: "429 Too Many Requests", + body: []byte(`{"retry_after":60}`), + expectError: true, + expectBody: true, + }, + { + name: "500 Internal Server Error", + statusCode: 500, + status: "500 Internal Server Error", + body: []byte(`{"error":"internal error"}`), + expectError: true, + expectBody: true, + }, + { + name: "502 Bad Gateway", + statusCode: 502, + status: "502 Bad Gateway", + body: nil, + expectError: true, + expectBody: false, + }, + { + name: "503 Service Unavailable", + statusCode: 503, + status: "503 Service Unavailable", + body: []byte("service temporarily unavailable"), + expectError: true, + expectBody: true, + }, + { + name: "504 Gateway Timeout", + statusCode: 504, + status: "504 Gateway Timeout", + body: nil, + expectError: true, + expectBody: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + resp := &hyperfleet_api.Response{ + StatusCode: tt.statusCode, + Status: tt.status, + Body: tt.body, + Attempts: 1, + Duration: 50 * time.Millisecond, + } + + err := ValidateAPIResponse(resp, nil, "GET", "http://example.com/api") + + if tt.expectError { + require.Error(t, err) + + apiErr, ok := apierrors.IsAPIError(err) + require.True(t, ok, "Expected error to be APIError") + + assert.Equal(t, tt.statusCode, apiErr.StatusCode) + assert.Equal(t, tt.status, apiErr.Status) + assert.Equal(t, "GET", apiErr.Method) + assert.Equal(t, "http://example.com/api", apiErr.URL) + + if tt.expectBody { + assert.Equal(t, tt.body, apiErr.ResponseBody) + assert.Contains(t, apiErr.Error(), string(tt.body)) + } + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestValidateAPIResponse_SuccessStatusCodes(t *testing.T) { + tests := []struct { + name string + statusCode int + status string + }{ + { + name: "200 OK", + statusCode: 200, + status: "200 OK", + }, + { + name: "201 Created", + statusCode: 201, + status: "201 Created", + }, + { + name: "202 Accepted", + statusCode: 202, + status: "202 Accepted", + }, + { + name: "204 No Content", + statusCode: 204, + status: "204 No Content", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + resp := &hyperfleet_api.Response{ + StatusCode: tt.statusCode, + Status: tt.status, + Body: nil, + Attempts: 1, + Duration: 30 * time.Millisecond, + } + + err := ValidateAPIResponse(resp, nil, "POST", "http://example.com/api/create") + + assert.NoError(t, err) + }) + } +} + +func TestValidateAPIResponse_PreservesAttempts(t *testing.T) { + resp := &hyperfleet_api.Response{ + StatusCode: 500, + Status: "500 Internal Server Error", + Body: []byte("error"), + Attempts: 5, + Duration: 10 * time.Second, + } + + err := ValidateAPIResponse(resp, nil, "GET", "http://example.com") + + require.Error(t, err) + apiErr, ok := apierrors.IsAPIError(err) + require.True(t, ok) + + assert.Equal(t, 5, apiErr.Attempts) + assert.Equal(t, 10*time.Second, apiErr.Duration) +} + +func TestValidateAPIResponse_AllHTTPMethods(t *testing.T) { + methods := []string{"GET", "POST", "PUT", "PATCH", "DELETE", "HEAD", "OPTIONS"} + + for _, method := range methods { + t.Run(method, func(t *testing.T) { + resp := &hyperfleet_api.Response{ + StatusCode: 404, + Status: "404 Not Found", + } + + err := ValidateAPIResponse(resp, nil, method, "http://example.com") + + require.Error(t, err) + apiErr, ok := apierrors.IsAPIError(err) + require.True(t, ok) + assert.Equal(t, method, apiErr.Method) + }) + } +} + +func TestValidateAPIResponse_URLPreserved(t *testing.T) { + urls := []string{ + "http://localhost:8080/api/v1/clusters", + "https://api.example.com/resources/123", + "http://service.namespace.svc.cluster.local:9090/health", + "http://10.0.0.1:3000/path/to/resource?query=value", + } + + for _, url := range urls { + t.Run(url, func(t *testing.T) { + resp := &hyperfleet_api.Response{ + StatusCode: 500, + Status: "500 Internal Server Error", + } + + err := ValidateAPIResponse(resp, nil, "GET", url) + + require.Error(t, err) + apiErr, ok := apierrors.IsAPIError(err) + require.True(t, ok) + assert.Equal(t, url, apiErr.URL) + assert.Contains(t, apiErr.Error(), url) + }) + } +} + +func TestValidateAPIResponse_WrappedErrorChain(t *testing.T) { + // Test that error wrapping works correctly for error inspection + innerErr := fmt.Errorf("connection reset by peer") + wrappedErr := fmt.Errorf("dial failed: %w", innerErr) + + err := ValidateAPIResponse(nil, wrappedErr, "GET", "http://example.com") + + require.Error(t, err) + + // Should be an APIError + apiErr, ok := apierrors.IsAPIError(err) + require.True(t, ok) + + // The underlying error chain should be preserved + assert.Contains(t, apiErr.Error(), "connection reset") +} + +func TestValidateAPIResponse_ErrorMessageContainsContext(t *testing.T) { + resp := &hyperfleet_api.Response{ + StatusCode: 503, + Status: "503 Service Unavailable", + Body: []byte(`{"message":"database connection failed","retry_after":30}`), + Attempts: 3, + Duration: 9 * time.Second, + } + + err := ValidateAPIResponse(resp, nil, "POST", "http://api.example.com/clusters") + + require.Error(t, err) + + errMsg := err.Error() + assert.Contains(t, errMsg, "POST") + assert.Contains(t, errMsg, "http://api.example.com/clusters") + assert.Contains(t, errMsg, "503") + assert.Contains(t, errMsg, "3") // attempts +} + +func TestValidateAPIResponse_APIErrorHelpers(t *testing.T) { + t.Run("IsServerError", func(t *testing.T) { + resp := &hyperfleet_api.Response{StatusCode: 500, Status: "500 Internal Server Error"} + err := ValidateAPIResponse(resp, nil, "GET", "http://example.com") + + apiErr, _ := apierrors.IsAPIError(err) + assert.True(t, apiErr.IsServerError()) + assert.False(t, apiErr.IsClientError()) + }) + + t.Run("IsClientError", func(t *testing.T) { + resp := &hyperfleet_api.Response{StatusCode: 400, Status: "400 Bad Request"} + err := ValidateAPIResponse(resp, nil, "GET", "http://example.com") + + apiErr, _ := apierrors.IsAPIError(err) + assert.True(t, apiErr.IsClientError()) + assert.False(t, apiErr.IsServerError()) + }) + + t.Run("IsNotFound", func(t *testing.T) { + resp := &hyperfleet_api.Response{StatusCode: 404, Status: "404 Not Found"} + err := ValidateAPIResponse(resp, nil, "GET", "http://example.com") + + apiErr, _ := apierrors.IsAPIError(err) + assert.True(t, apiErr.IsNotFound()) + }) + + t.Run("IsUnauthorized", func(t *testing.T) { + resp := &hyperfleet_api.Response{StatusCode: 401, Status: "401 Unauthorized"} + err := ValidateAPIResponse(resp, nil, "GET", "http://example.com") + + apiErr, _ := apierrors.IsAPIError(err) + assert.True(t, apiErr.IsUnauthorized()) + }) + + t.Run("IsForbidden", func(t *testing.T) { + resp := &hyperfleet_api.Response{StatusCode: 403, Status: "403 Forbidden"} + err := ValidateAPIResponse(resp, nil, "GET", "http://example.com") + + apiErr, _ := apierrors.IsAPIError(err) + assert.True(t, apiErr.IsForbidden()) + }) + + t.Run("IsRateLimited", func(t *testing.T) { + resp := &hyperfleet_api.Response{StatusCode: 429, Status: "429 Too Many Requests"} + err := ValidateAPIResponse(resp, nil, "GET", "http://example.com") + + apiErr, _ := apierrors.IsAPIError(err) + assert.True(t, apiErr.IsRateLimited()) + }) + + t.Run("IsBadRequest", func(t *testing.T) { + resp := &hyperfleet_api.Response{StatusCode: 400, Status: "400 Bad Request"} + err := ValidateAPIResponse(resp, nil, "GET", "http://example.com") + + apiErr, _ := apierrors.IsAPIError(err) + assert.True(t, apiErr.IsBadRequest()) + }) + + t.Run("IsConflict", func(t *testing.T) { + resp := &hyperfleet_api.Response{StatusCode: 409, Status: "409 Conflict"} + err := ValidateAPIResponse(resp, nil, "POST", "http://example.com") + + apiErr, _ := apierrors.IsAPIError(err) + assert.True(t, apiErr.IsConflict()) + }) +} + +func TestValidateAPIResponse_ResponseBodyString(t *testing.T) { + resp := &hyperfleet_api.Response{ + StatusCode: 500, + Status: "500 Internal Server Error", + Body: []byte(`{"error":"database timeout","code":"DB_TIMEOUT"}`), + } + + err := ValidateAPIResponse(resp, nil, "GET", "http://example.com") + + apiErr, _ := apierrors.IsAPIError(err) + + assert.True(t, apiErr.HasResponseBody()) + assert.Equal(t, `{"error":"database timeout","code":"DB_TIMEOUT"}`, apiErr.ResponseBodyString()) +} + +// TestToConditionDefs tests the conversion of config_loader conditions to criteria definitions +func TestToConditionDefs(t *testing.T) { + tests := []struct { + name string + conditions []config_loader.Condition + expected []criteria.ConditionDef + }{ + { + name: "empty conditions", + conditions: []config_loader.Condition{}, + expected: []criteria.ConditionDef{}, + }, + { + name: "single condition", + conditions: []config_loader.Condition{ + {Field: "status.phase", Operator: "equals", Value: "Running"}, + }, + expected: []criteria.ConditionDef{ + {Field: "status.phase", Operator: criteria.OperatorEquals, Value: "Running"}, + }, + }, + { + name: "multiple conditions with camelCase operators", + conditions: []config_loader.Condition{ + {Field: "status.phase", Operator: "equals", Value: "Running"}, + {Field: "replicas", Operator: "greaterThan", Value: 0}, + {Field: "metadata.labels.app", Operator: "notEquals", Value: ""}, + }, + expected: []criteria.ConditionDef{ + {Field: "status.phase", Operator: criteria.OperatorEquals, Value: "Running"}, + {Field: "replicas", Operator: criteria.OperatorGreaterThan, Value: 0}, + {Field: "metadata.labels.app", Operator: criteria.OperatorNotEquals, Value: ""}, + }, + }, + { + name: "all operator types", + conditions: []config_loader.Condition{ + {Field: "f1", Operator: "equals", Value: "v1"}, + {Field: "f2", Operator: "notEquals", Value: "v2"}, + {Field: "f3", Operator: "greaterThan", Value: 10}, + {Field: "f4", Operator: "lessThan", Value: 5}, + {Field: "f5", Operator: "contains", Value: "test"}, + {Field: "f6", Operator: "in", Value: []string{"a", "b"}}, + }, + expected: []criteria.ConditionDef{ + {Field: "f1", Operator: criteria.OperatorEquals, Value: "v1"}, + {Field: "f2", Operator: criteria.OperatorNotEquals, Value: "v2"}, + {Field: "f3", Operator: criteria.OperatorGreaterThan, Value: 10}, + {Field: "f4", Operator: criteria.OperatorLessThan, Value: 5}, + {Field: "f5", Operator: criteria.OperatorContains, Value: "test"}, + {Field: "f6", Operator: criteria.OperatorIn, Value: []string{"a", "b"}}, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := ToConditionDefs(tt.conditions) + + require.Len(t, result, len(tt.expected)) + for i, expected := range tt.expected { + assert.Equal(t, expected.Field, result[i].Field) + assert.Equal(t, expected.Operator, result[i].Operator) + assert.Equal(t, expected.Value, result[i].Value) + } + }) + } +} + +// TestRenderTemplateBytes tests template rendering to bytes +func TestRenderTemplateBytes(t *testing.T) { + tests := []struct { + name string + template string + data map[string]interface{} + expected []byte + expectError bool + }{ + { + name: "simple template", + template: "Hello {{ .name }}!", + data: map[string]interface{}{"name": "World"}, + expected: []byte("Hello World!"), + }, + { + name: "no template markers", + template: "plain text", + data: map[string]interface{}{}, + expected: []byte("plain text"), + }, + { + name: "JSON body template", + template: `{"cluster_id": "{{ .clusterId }}", "region": "{{ .region }}"}`, + data: map[string]interface{}{"clusterId": "cluster-123", "region": "us-east-1"}, + expected: []byte(`{"cluster_id": "cluster-123", "region": "us-east-1"}`), + }, + { + name: "missing variable error", + template: "{{ .missing }}", + data: map[string]interface{}{}, + expectError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := renderTemplateBytes(tt.template, tt.data) + + if tt.expectError { + assert.Error(t, err) + return + } + + require.NoError(t, err) + assert.Equal(t, tt.expected, result) + }) + } +} + +// TestExecutionErrorToMap tests conversion of ExecutionError to map +func TestExecutionErrorToMap(t *testing.T) { + tests := []struct { + name string + execErr *ExecutionError + expected interface{} + }{ + { + name: "nil error", + execErr: nil, + expected: nil, + }, + { + name: "error with all fields", + execErr: &ExecutionError{ + Phase: "preconditions", + Step: "check-cluster", + Message: "Cluster not found", + }, + expected: map[string]interface{}{ + "phase": "preconditions", + "step": "check-cluster", + "message": "Cluster not found", + }, + }, + { + name: "error with empty fields", + execErr: &ExecutionError{ + Phase: "", + Step: "", + Message: "", + }, + expected: map[string]interface{}{ + "phase": "", + "step": "", + "message": "", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := executionErrorToMap(tt.execErr) + + if tt.expected == nil { + assert.Nil(t, result) + return + } + + expectedMap := tt.expected.(map[string]interface{}) + resultMap := result.(map[string]interface{}) + assert.Equal(t, expectedMap["phase"], resultMap["phase"]) + assert.Equal(t, expectedMap["step"], resultMap["step"]) + assert.Equal(t, expectedMap["message"], resultMap["message"]) + }) + } +} + +// TestAdapterMetadataToMap tests conversion of AdapterMetadata to map +func TestAdapterMetadataToMap(t *testing.T) { + tests := []struct { + name string + adapter *AdapterMetadata + expected map[string]interface{} + }{ + { + name: "nil adapter", + adapter: nil, + expected: map[string]interface{}{}, + }, + { + name: "success status", + adapter: &AdapterMetadata{ + ExecutionStatus: "success", + ResourcesSkipped: false, + SkipReason: "", + ErrorReason: "", + ErrorMessage: "", + ExecutionError: nil, + }, + expected: map[string]interface{}{ + "executionStatus": "success", + "resourcesSkipped": false, + "skipReason": "", + "errorReason": "", + "errorMessage": "", + "executionError": nil, + }, + }, + { + name: "skipped status", + adapter: &AdapterMetadata{ + ExecutionStatus: "success", + ResourcesSkipped: true, + SkipReason: "Precondition 'check-status' not met", + ErrorReason: "", + ErrorMessage: "", + ExecutionError: nil, + }, + expected: map[string]interface{}{ + "executionStatus": "success", + "resourcesSkipped": true, + "skipReason": "Precondition 'check-status' not met", + "errorReason": "", + "errorMessage": "", + "executionError": nil, + }, + }, + { + name: "failed status with error", + adapter: &AdapterMetadata{ + ExecutionStatus: "failed", + ResourcesSkipped: false, + SkipReason: "", + ErrorReason: "APIError", + ErrorMessage: "API returned 500", + ExecutionError: &ExecutionError{ + Phase: "preconditions", + Step: "fetch-cluster", + Message: "Connection refused", + }, + }, + expected: map[string]interface{}{ + "executionStatus": "failed", + "resourcesSkipped": false, + "skipReason": "", + "errorReason": "APIError", + "errorMessage": "API returned 500", + "executionError": map[string]interface{}{ + "phase": "preconditions", + "step": "fetch-cluster", + "message": "Connection refused", + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := adapterMetadataToMap(tt.adapter) + + assert.Equal(t, tt.expected["executionStatus"], result["executionStatus"]) + assert.Equal(t, tt.expected["resourcesSkipped"], result["resourcesSkipped"]) + assert.Equal(t, tt.expected["skipReason"], result["skipReason"]) + assert.Equal(t, tt.expected["errorReason"], result["errorReason"]) + assert.Equal(t, tt.expected["errorMessage"], result["errorMessage"]) + + if tt.expected["executionError"] == nil { + assert.Nil(t, result["executionError"]) + } else { + expectedErr := tt.expected["executionError"].(map[string]interface{}) + resultErr := result["executionError"].(map[string]interface{}) + assert.Equal(t, expectedErr["phase"], resultErr["phase"]) + assert.Equal(t, expectedErr["step"], resultErr["step"]) + assert.Equal(t, expectedErr["message"], resultErr["message"]) + } + }) + } +} + +// TestExecuteLogAction tests log action execution +func TestExecuteLogAction(t *testing.T) { + tests := []struct { + name string + logAction *config_loader.LogAction + params map[string]interface{} + expectCall bool + }{ + { + name: "nil log action", + logAction: nil, + params: map[string]interface{}{}, + expectCall: false, + }, + { + name: "empty message", + logAction: &config_loader.LogAction{Message: ""}, + params: map[string]interface{}{}, + expectCall: false, + }, + { + name: "simple message", + logAction: &config_loader.LogAction{Message: "Hello World", Level: "info"}, + params: map[string]interface{}{}, + expectCall: true, + }, + { + name: "template message", + logAction: &config_loader.LogAction{Message: "Processing cluster {{ .clusterId }}", Level: "info"}, + params: map[string]interface{}{"clusterId": "cluster-123"}, + expectCall: true, + }, + { + name: "debug level", + logAction: &config_loader.LogAction{Message: "Debug info", Level: "debug"}, + params: map[string]interface{}{}, + expectCall: true, + }, + { + name: "warning level", + logAction: &config_loader.LogAction{Message: "Warning message", Level: "warning"}, + params: map[string]interface{}{}, + expectCall: true, + }, + { + name: "error level", + logAction: &config_loader.LogAction{Message: "Error occurred", Level: "error"}, + params: map[string]interface{}{}, + expectCall: true, + }, + { + name: "default level (empty)", + logAction: &config_loader.LogAction{Message: "Default level", Level: ""}, + params: map[string]interface{}{}, + expectCall: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + log := &mockLogger{} + execCtx := &ExecutionContext{Params: tt.params} + + // This should not panic + ExecuteLogAction(tt.logAction, execCtx, log) + + // We don't verify the exact log output, just that it doesn't error + }) + } +} + +// TestConvertToStringKeyMap tests map key conversion +func TestConvertToStringKeyMap(t *testing.T) { + tests := []struct { + name string + input map[interface{}]interface{} + expected map[string]interface{} + }{ + { + name: "empty map", + input: map[interface{}]interface{}{}, + expected: map[string]interface{}{}, + }, + { + name: "simple string keys", + input: map[interface{}]interface{}{ + "key1": "value1", + "key2": "value2", + }, + expected: map[string]interface{}{ + "key1": "value1", + "key2": "value2", + }, + }, + { + name: "integer keys", + input: map[interface{}]interface{}{ + 1: "one", + 2: "two", + }, + expected: map[string]interface{}{ + "1": "one", + "2": "two", + }, + }, + { + name: "nested map", + input: map[interface{}]interface{}{ + "outer": map[interface{}]interface{}{ + "inner": "value", + }, + }, + expected: map[string]interface{}{ + "outer": map[string]interface{}{ + "inner": "value", + }, + }, + }, + { + name: "nested slice", + input: map[interface{}]interface{}{ + "items": []interface{}{"a", "b", "c"}, + }, + expected: map[string]interface{}{ + "items": []interface{}{"a", "b", "c"}, + }, + }, + { + name: "deeply nested structure", + input: map[interface{}]interface{}{ + "level1": map[interface{}]interface{}{ + "level2": map[interface{}]interface{}{ + "level3": "deep value", + }, + }, + }, + expected: map[string]interface{}{ + "level1": map[string]interface{}{ + "level2": map[string]interface{}{ + "level3": "deep value", + }, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := convertToStringKeyMap(tt.input) + assert.Equal(t, tt.expected, result) + }) + } +} + +// TestConvertSlice tests slice element conversion +func TestConvertSlice(t *testing.T) { + tests := []struct { + name string + input []interface{} + expected []interface{} + }{ + { + name: "empty slice", + input: []interface{}{}, + expected: []interface{}{}, + }, + { + name: "simple values", + input: []interface{}{"a", "b", "c"}, + expected: []interface{}{"a", "b", "c"}, + }, + { + name: "numeric values", + input: []interface{}{1, 2, 3}, + expected: []interface{}{1, 2, 3}, + }, + { + name: "nested maps in slice", + input: []interface{}{ + map[interface{}]interface{}{"key": "value1"}, + map[interface{}]interface{}{"key": "value2"}, + }, + expected: []interface{}{ + map[string]interface{}{"key": "value1"}, + map[string]interface{}{"key": "value2"}, + }, + }, + { + name: "nested slices", + input: []interface{}{ + []interface{}{"a", "b"}, + []interface{}{"c", "d"}, + }, + expected: []interface{}{ + []interface{}{"a", "b"}, + []interface{}{"c", "d"}, + }, + }, + { + name: "mixed types", + input: []interface{}{ + "string", + 123, + map[interface{}]interface{}{"nested": "map"}, + []interface{}{"nested", "slice"}, + }, + expected: []interface{}{ + "string", + 123, + map[string]interface{}{"nested": "map"}, + []interface{}{"nested", "slice"}, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := convertSlice(tt.input) + assert.Equal(t, tt.expected, result) + }) + } +} + +// TestBuildResourcesMap tests building resources map for CEL +func TestBuildResourcesMap(t *testing.T) { + tests := []struct { + name string + resources map[string]*unstructured.Unstructured + expected map[string]interface{} + }{ + { + name: "nil resources", + resources: nil, + expected: map[string]interface{}{}, + }, + { + name: "empty resources", + resources: map[string]*unstructured.Unstructured{}, + expected: map[string]interface{}{}, + }, + { + name: "single resource", + resources: map[string]*unstructured.Unstructured{ + "cluster": { + Object: map[string]interface{}{ + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": map[string]interface{}{ + "name": "test-cluster", + }, + }, + }, + }, + expected: map[string]interface{}{ + "cluster": map[string]interface{}{ + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": map[string]interface{}{ + "name": "test-cluster", + }, + }, + }, + }, + { + name: "multiple resources", + resources: map[string]*unstructured.Unstructured{ + "configmap": { + Object: map[string]interface{}{ + "kind": "ConfigMap", + "data": map[string]interface{}{"key": "value"}, + }, + }, + "secret": { + Object: map[string]interface{}{ + "kind": "Secret", + "data": map[string]interface{}{"password": "encoded"}, + }, + }, + }, + expected: map[string]interface{}{ + "configmap": map[string]interface{}{ + "kind": "ConfigMap", + "data": map[string]interface{}{"key": "value"}, + }, + "secret": map[string]interface{}{ + "kind": "Secret", + "data": map[string]interface{}{"password": "encoded"}, + }, + }, + }, + { + name: "nil resource in map", + resources: map[string]*unstructured.Unstructured{ + "valid": { + Object: map[string]interface{}{"kind": "ConfigMap"}, + }, + "nil_resource": nil, + }, + expected: map[string]interface{}{ + "valid": map[string]interface{}{"kind": "ConfigMap"}, + // nil_resource is not included + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := BuildResourcesMap(tt.resources) + assert.Equal(t, tt.expected, result) + }) + } +} + +// TestGetResourceAsMap tests resource to map conversion +func TestGetResourceAsMap(t *testing.T) { + tests := []struct { + name string + resource *unstructured.Unstructured + expected map[string]interface{} + }{ + { + name: "nil resource", + resource: nil, + expected: nil, + }, + { + name: "simple resource", + resource: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "v1", + "kind": "Pod", + "metadata": map[string]interface{}{ + "name": "test-pod", + "namespace": "default", + }, + }, + }, + expected: map[string]interface{}{ + "apiVersion": "v1", + "kind": "Pod", + "metadata": map[string]interface{}{ + "name": "test-pod", + "namespace": "default", + }, + }, + }, + { + name: "resource with status", + resource: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": "Deployment", + "status": map[string]interface{}{ + "replicas": 3, + "availableReplicas": 3, + "conditions": []interface{}{ + map[string]interface{}{"type": "Available", "status": "True"}, + }, + }, + }, + }, + expected: map[string]interface{}{ + "kind": "Deployment", + "status": map[string]interface{}{ + "replicas": 3, + "availableReplicas": 3, + "conditions": []interface{}{ + map[string]interface{}{"type": "Available", "status": "True"}, + }, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := GetResourceAsMap(tt.resource) + assert.Equal(t, tt.expected, result) + }) + } +} + diff --git a/internal/hyperfleet_api/client_test.go b/internal/hyperfleet_api/client_test.go index dfd97e4..70f4dfa 100644 --- a/internal/hyperfleet_api/client_test.go +++ b/internal/hyperfleet_api/client_test.go @@ -8,23 +8,20 @@ import ( "net/http" "net/http/httptest" "os" - "strings" "sync/atomic" "testing" "time" "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestNewClient(t *testing.T) { // NewClient requires base URL - test with explicit base URL client, err := NewClient(WithBaseURL("http://localhost:8080")) - if err != nil { - t.Fatalf("NewClient returned error: %v", err) - } - if client == nil { - t.Fatal("NewClient returned nil") - } + require.NoError(t, err) + require.NotNil(t, client) } func TestNewClientMissingBaseURL(t *testing.T) { @@ -46,12 +43,8 @@ func TestNewClientMissingBaseURL(t *testing.T) { } _, err := NewClient() - if err == nil { - t.Fatal("expected error when base URL not configured") - } - if !strings.Contains(err.Error(), "base URL") { - t.Errorf("error should mention base URL, got: %v", err) - } + require.Error(t, err) + assert.Contains(t, err.Error(), "base URL") } func TestNewClientWithOptions(t *testing.T) { @@ -149,16 +142,12 @@ func TestClientGet(t *testing.T) { // Use server URL as base URL for testing client, err := NewClient(WithBaseURL(server.URL)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } + require.NoError(t, err, "failed to create client") ctx := context.Background() // Use relative path - base URL will be prepended resp, err := client.Get(ctx, "/test") - if err != nil { - t.Fatalf("unexpected error: %v", err) - } + require.NoError(t, err, "unexpected error") if resp.StatusCode != http.StatusOK { t.Errorf("expected status 200, got %d", resp.StatusCode) @@ -186,16 +175,12 @@ func TestClientPost(t *testing.T) { defer server.Close() client, err := NewClient(WithBaseURL(server.URL)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } + require.NoError(t, err, "failed to create client") ctx := context.Background() body := []byte(`{"key":"value"}`) resp, err := client.Post(ctx, "/test", body, WithHeader("Content-Type", "application/json")) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } + require.NoError(t, err, "unexpected error") if resp.StatusCode != http.StatusCreated { t.Errorf("expected status 201, got %d", resp.StatusCode) @@ -222,18 +207,14 @@ func TestClientWithHeaders(t *testing.T) { defer server.Close() client, err := NewClient(WithBaseURL(server.URL), WithDefaultHeader("Authorization", "Bearer default-token")) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } + require.NoError(t, err, "failed to create client") ctx := context.Background() // Test with additional header _, err = client.Get(ctx, "/test", WithHeader("X-Custom-Header", "custom-value"), ) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } + require.NoError(t, err, "unexpected error") if receivedAuth != "Bearer default-token" { t.Errorf("expected Authorization header 'Bearer default-token', got %q", receivedAuth) @@ -265,15 +246,11 @@ func TestClientRetry(t *testing.T) { config.BaseDelay = 10 * time.Millisecond // Short delay for tests client, err := NewClient(WithConfig(config)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } + require.NoError(t, err, "failed to create client") ctx := context.Background() resp, err := client.Get(ctx, "/test") - if err != nil { - t.Fatalf("unexpected error: %v", err) - } + require.NoError(t, err, "unexpected error") if resp.StatusCode != http.StatusOK { t.Errorf("expected status 200, got %d", resp.StatusCode) @@ -304,19 +281,13 @@ func TestClientRetryExhausted(t *testing.T) { config.BaseDelay = 10 * time.Millisecond client, err := NewClient(WithConfig(config)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } + require.NoError(t, err, "failed to create client") ctx := context.Background() resp, err := client.Get(ctx, "/test") - if err == nil { - t.Fatal("expected error, got nil") - } + require.Error(t, err, "expected error, got nil") - if resp == nil { - t.Fatal("expected response even on error") - } + require.NotNil(t, resp, "expected response even on error") if resp.StatusCode != http.StatusServiceUnavailable { t.Errorf("expected status 503, got %d", resp.StatusCode) @@ -341,15 +312,11 @@ func TestClientNoRetryOn4xx(t *testing.T) { config.RetryAttempts = 3 client, err := NewClient(WithConfig(config)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } + require.NoError(t, err, "failed to create client") ctx := context.Background() resp, err := client.Get(ctx, "/test") - if err != nil { - t.Fatalf("unexpected error: %v", err) - } + require.NoError(t, err, "unexpected error") // Should not retry on 400 if atomic.LoadInt32(&attemptCount) != 1 { @@ -375,15 +342,11 @@ func TestClientTimeout(t *testing.T) { config.RetryAttempts = 1 client, err := NewClient(WithConfig(config)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } + require.NoError(t, err, "failed to create client") ctx := context.Background() _, err = client.Get(ctx, "/test") - if err == nil { - t.Fatal("expected timeout error, got nil") - } + require.Error(t, err, "expected timeout error, got nil") } func TestClientContextCancellation(t *testing.T) { @@ -394,16 +357,12 @@ func TestClientContextCancellation(t *testing.T) { defer server.Close() client, err := NewClient(WithBaseURL(server.URL)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } + require.NoError(t, err, "failed to create client") ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) defer cancel() _, err = client.Get(ctx, "/test") - if err == nil { - t.Fatal("expected context cancellation error, got nil") - } + require.Error(t, err, "expected context cancellation error, got nil") } func TestResponseHelpers(t *testing.T) { @@ -501,15 +460,11 @@ func TestClientPut(t *testing.T) { defer server.Close() client, err := NewClient(WithBaseURL(server.URL)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } + require.NoError(t, err, "failed to create client") ctx := context.Background() resp, err := client.Put(ctx, "/test", []byte(`{"id":"123"}`)) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } + require.NoError(t, err, "unexpected error") if resp.StatusCode != http.StatusOK { t.Errorf("expected status 200, got %d", resp.StatusCode) @@ -526,15 +481,11 @@ func TestClientPatch(t *testing.T) { defer server.Close() client, err := NewClient(WithBaseURL(server.URL)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } + require.NoError(t, err, "failed to create client") ctx := context.Background() resp, err := client.Patch(ctx, "/test", []byte(`{"field":"value"}`)) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } + require.NoError(t, err, "unexpected error") if resp.StatusCode != http.StatusOK { t.Errorf("expected status 200, got %d", resp.StatusCode) @@ -551,15 +502,11 @@ func TestClientDelete(t *testing.T) { defer server.Close() client, err := NewClient(WithBaseURL(server.URL)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } + require.NoError(t, err, "failed to create client") ctx := context.Background() resp, err := client.Delete(ctx, "/test") - if err != nil { - t.Fatalf("unexpected error: %v", err) - } + require.NoError(t, err, "unexpected error") if resp.StatusCode != http.StatusNoContent { t.Errorf("expected status 204, got %d", resp.StatusCode) @@ -580,15 +527,9 @@ func TestAPIError(t *testing.T) { // Test Error() method errStr := err.Error() - if !strings.Contains(errStr, "POST") { - t.Errorf("error string should contain method, got: %s", errStr) - } - if !strings.Contains(errStr, "503") { - t.Errorf("error string should contain status code, got: %s", errStr) - } - if !strings.Contains(errStr, "3 attempt") { - t.Errorf("error string should contain attempts, got: %s", errStr) - } + assert.Contains(t, errStr, "POST", "error string should contain method, got: %s",errStr) + assert.Contains(t, errStr, "503", "error string should contain status code, got: %s",errStr) + assert.Contains(t, errStr, "3 attempt", "error string should contain attempts, got: %s",errStr) // Test helper methods if !err.IsServerError() { @@ -603,9 +544,7 @@ func TestAPIError(t *testing.T) { // Test ResponseBodyString bodyStr := err.ResponseBodyString() - if !strings.Contains(bodyStr, "backend is down") { - t.Errorf("expected response body string to contain error message, got: %s", bodyStr) - } + assert.Contains(t, bodyStr, "backend is down", "expected response body string to contain error message, got: %s",bodyStr) } func TestAPIErrorStatusHelpers(t *testing.T) { @@ -693,15 +632,11 @@ func TestAPIErrorInRetryExhausted(t *testing.T) { config.BaseDelay = 10 * time.Millisecond client, err := NewClient(WithConfig(config)) - if err != nil { - t.Fatalf("failed to create client: %v", err) - } + require.NoError(t, err, "failed to create client") ctx := context.Background() _, err = client.Get(ctx, "/test") - if err == nil { - t.Fatal("expected error, got nil") - } + require.Error(t, err, "expected error, got nil") // Check if it's an APIError with proper details apiErr, ok := errors.IsAPIError(err) @@ -718,9 +653,7 @@ func TestAPIErrorInRetryExhausted(t *testing.T) { if apiErr.Attempts != 2 { t.Errorf("expected 2 attempts, got %d", apiErr.Attempts) } - if !strings.Contains(apiErr.ResponseBodyString(), "backend overloaded") { - t.Errorf("expected response body to contain error message, got: %s", apiErr.ResponseBodyString()) - } + assert.Contains(t, apiErr.ResponseBodyString(), "backend overloaded", "expected response body to contain error message, got: %s",apiErr.ResponseBodyString()) if !apiErr.IsServerError() { t.Error("expected IsServerError to return true") } diff --git a/internal/hyperfleet_api/mock.go b/internal/hyperfleet_api/mock.go new file mode 100644 index 0000000..c43991f --- /dev/null +++ b/internal/hyperfleet_api/mock.go @@ -0,0 +1,139 @@ +package hyperfleet_api + +import ( + "context" +) + +// MockClient implements Client for testing. +// It allows configuring mock responses for each method. +type MockClient struct { + // BaseURLValue is the value returned by BaseURL() + BaseURLValue string + + // DoResponse and DoError are returned by Do() + DoResponse *Response + DoError error + + // GetResponse and GetError are returned by Get() + GetResponse *Response + GetError error + + // PostResponse and PostError are returned by Post() + PostResponse *Response + PostError error + + // PutResponse and PutError are returned by Put() + PutResponse *Response + PutError error + + // PatchResponse and PatchError are returned by Patch() + PatchResponse *Response + PatchError error + + // DeleteResponse and DeleteError are returned by Delete() + DeleteResponse *Response + DeleteError error + + // Requests records all requests made to this mock for verification + Requests []*Request +} + +// NewMockClient creates a new mock API client for testing. +// By default, all methods return a 200 OK response. +func NewMockClient() *MockClient { + defaultResponse := &Response{ + StatusCode: 200, + Status: "200 OK", + } + return &MockClient{ + BaseURLValue: "http://mock-api.example.com", + DoResponse: defaultResponse, + GetResponse: defaultResponse, + PostResponse: defaultResponse, + PutResponse: defaultResponse, + PatchResponse: defaultResponse, + DeleteResponse: defaultResponse, + Requests: make([]*Request, 0), + } +} + +// Do implements Client.Do +func (m *MockClient) Do(ctx context.Context, req *Request) (*Response, error) { + m.Requests = append(m.Requests, req) + if m.DoError != nil { + return nil, m.DoError + } + return m.DoResponse, nil +} + +// Get implements Client.Get +func (m *MockClient) Get(ctx context.Context, url string, opts ...RequestOption) (*Response, error) { + req := &Request{Method: "GET", URL: url} + m.Requests = append(m.Requests, req) + if m.GetError != nil { + return nil, m.GetError + } + return m.GetResponse, nil +} + +// Post implements Client.Post +func (m *MockClient) Post(ctx context.Context, url string, body []byte, opts ...RequestOption) (*Response, error) { + req := &Request{Method: "POST", URL: url, Body: body} + m.Requests = append(m.Requests, req) + if m.PostError != nil { + return nil, m.PostError + } + return m.PostResponse, nil +} + +// Put implements Client.Put +func (m *MockClient) Put(ctx context.Context, url string, body []byte, opts ...RequestOption) (*Response, error) { + req := &Request{Method: "PUT", URL: url, Body: body} + m.Requests = append(m.Requests, req) + if m.PutError != nil { + return nil, m.PutError + } + return m.PutResponse, nil +} + +// Patch implements Client.Patch +func (m *MockClient) Patch(ctx context.Context, url string, body []byte, opts ...RequestOption) (*Response, error) { + req := &Request{Method: "PATCH", URL: url, Body: body} + m.Requests = append(m.Requests, req) + if m.PatchError != nil { + return nil, m.PatchError + } + return m.PatchResponse, nil +} + +// Delete implements Client.Delete +func (m *MockClient) Delete(ctx context.Context, url string, opts ...RequestOption) (*Response, error) { + req := &Request{Method: "DELETE", URL: url} + m.Requests = append(m.Requests, req) + if m.DeleteError != nil { + return nil, m.DeleteError + } + return m.DeleteResponse, nil +} + +// BaseURL implements Client.BaseURL +func (m *MockClient) BaseURL() string { + return m.BaseURLValue +} + +// Reset clears all recorded requests +func (m *MockClient) Reset() { + m.Requests = make([]*Request, 0) +} + +// GetLastRequest returns the most recent request, or nil if none +func (m *MockClient) GetLastRequest() *Request { + if len(m.Requests) == 0 { + return nil + } + return m.Requests[len(m.Requests)-1] +} + +// Ensure MockClient implements Client +var _ Client = (*MockClient)(nil) + diff --git a/internal/k8s_client/README.md b/internal/k8s_client/README.md index 57b0c64..911e633 100644 --- a/internal/k8s_client/README.md +++ b/internal/k8s_client/README.md @@ -14,7 +14,7 @@ The k8s_client package provides foundational Kubernetes API operations using **c - In-cluster and kubeconfig-based authentication - Industry-standard controller-runtime client (used by Kubebuilder, Operator SDK) -**For high-level operations:** Use `internal/config-loader` package which provides `ResourceManager` with template rendering, resource discovery, and lifecycle management. +**For high-level operations:** Use `internal/executor` package which provides resource management with template rendering, resource discovery, and lifecycle management. ### Why Controller-Runtime? @@ -316,10 +316,11 @@ See `test/integration/k8s_client/` for integration test examples and setup guide 2. **Use in-cluster auth for production** (empty `KubeConfigPath`) 3. **Set appropriate rate limits** to avoid overwhelming the API server 4. **Handle errors gracefully** - check for `IsNotFound`, `IsAlreadyExists`, etc. -5. **Use high-level ResourceManager** from `config-loader` for template rendering and discovery +5. **Use high-level executor** from `internal/executor` for template rendering and discovery ## Related Packages -- **`internal/config-loader`**: High-level resource management with templates and discovery +- **`internal/executor`**: High-level resource management with templates and discovery +- **`internal/config_loader`**: Parses adapter configurations - **`pkg/errors`**: Error handling utilities - **`pkg/logger`**: Logging interface diff --git a/internal/k8s_client/client.go b/internal/k8s_client/client.go index 603f313..1f6d6e2 100644 --- a/internal/k8s_client/client.go +++ b/internal/k8s_client/client.go @@ -4,7 +4,7 @@ import ( "context" "encoding/json" - "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/errors" + apperrors "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/errors" "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/logger" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -63,14 +63,14 @@ func NewClient(ctx context.Context, config ClientConfig, log logger.Logger) (*Cl // Use in-cluster config with ServiceAccount restConfig, err = rest.InClusterConfig() if err != nil { - return nil, errors.KubernetesError("failed to create in-cluster config: %v", err) + return nil, apperrors.KubernetesError("failed to create in-cluster config: %v", err) } log.Info("Using in-cluster Kubernetes configuration (ServiceAccount)") } else { // Use kubeconfig file for local development or remote access restConfig, err = clientcmd.BuildConfigFromFlags("", config.KubeConfigPath) if err != nil { - return nil, errors.KubernetesError("failed to load kubeconfig from %s: %v", config.KubeConfigPath, err) + return nil, apperrors.KubernetesError("failed to load kubeconfig from %s: %v", config.KubeConfigPath, err) } log.Infof("Using kubeconfig from: %s", config.KubeConfigPath) } @@ -91,7 +91,7 @@ func NewClient(ctx context.Context, config ClientConfig, log logger.Logger) (*Cl // This provides automatic caching, better performance, and cleaner API k8sClient, err := client.New(restConfig, client.Options{}) if err != nil { - return nil, errors.KubernetesError("failed to create kubernetes client: %v", err) + return nil, apperrors.KubernetesError("failed to create kubernetes client: %v", err) } return &Client{ @@ -105,7 +105,7 @@ func NewClient(ctx context.Context, config ClientConfig, log logger.Logger) (*Cl func NewClientFromConfig(ctx context.Context, restConfig *rest.Config, log logger.Logger) (*Client, error) { k8sClient, err := client.New(restConfig, client.Options{}) if err != nil { - return nil, errors.KubernetesError("failed to create kubernetes client: %v", err) + return nil, apperrors.KubernetesError("failed to create kubernetes client: %v", err) } return &Client{ @@ -125,9 +125,16 @@ func (c *Client) CreateResource(ctx context.Context, obj *unstructured.Unstructu err := c.client.Create(ctx, obj) if err != nil { if apierrors.IsAlreadyExists(err) { - return nil, err - } - return nil, errors.KubernetesError("failed to create resource %s/%s (namespace: %s): %v", gvk.Kind, name, namespace, err) + return nil, err + } + return nil, &apperrors.K8sOperationError{ + Operation: "create", + Resource: name, + Kind: gvk.Kind, + Namespace: namespace, + Message: err.Error(), + Err: err, + } } c.log.Infof("Successfully created resource: %s/%s", gvk.Kind, name) @@ -152,7 +159,14 @@ func (c *Client) GetResource(ctx context.Context, gvk schema.GroupVersionKind, n if apierrors.IsNotFound(err) { return nil, err } - return nil, errors.KubernetesError("failed to get resource %s/%s (namespace: %s): %v", gvk.Kind, name, namespace, err) + return nil, &apperrors.K8sOperationError{ + Operation: "get", + Resource: name, + Kind: gvk.Kind, + Namespace: namespace, + Message: err.Error(), + Err: err, + } } c.log.Infof("Successfully retrieved resource: %s/%s", gvk.Kind, name) @@ -180,18 +194,25 @@ func (c *Client) ListResources(ctx context.Context, gvk schema.GroupVersionKind, if labelSelector != "" { selector, err := metav1.ParseToLabelSelector(labelSelector) if err != nil { - return nil, errors.KubernetesError("invalid label selector %s: %v", labelSelector, err) + return nil, apperrors.KubernetesError("invalid label selector %s: %v", labelSelector, err) } parsedLabelSelector, err := metav1.LabelSelectorAsSelector(selector) if err != nil { - return nil, errors.KubernetesError("failed to convert label selector: %v", err) + return nil, apperrors.KubernetesError("failed to convert label selector: %v", err) } opts = append(opts, client.MatchingLabelsSelector{Selector: parsedLabelSelector}) } err := c.client.List(ctx, list, opts...) if err != nil { - return nil, errors.KubernetesError("failed to list resources %s (namespace: %s, labelSelector: %s): %v", gvk.Kind, namespace, labelSelector, err) + return nil, &apperrors.K8sOperationError{ + Operation: "list", + Resource: gvk.Kind, + Kind: gvk.Kind, + Namespace: namespace, + Message: err.Error(), + Err: err, + } } c.log.Infof("Successfully listed resources: %s (found %d items)", gvk.Kind, len(list.Items)) @@ -230,7 +251,14 @@ func (c *Client) UpdateResource(ctx context.Context, obj *unstructured.Unstructu if apierrors.IsConflict(err) { return nil, err } - return nil, errors.KubernetesError("failed to update resource %s/%s (namespace: %s): %v", gvk.Kind, name, namespace, err) + return nil, &apperrors.K8sOperationError{ + Operation: "update", + Resource: name, + Kind: gvk.Kind, + Namespace: namespace, + Message: err.Error(), + Err: err, + } } c.log.Infof("Successfully updated resource: %s/%s", gvk.Kind, name) @@ -252,7 +280,14 @@ func (c *Client) DeleteResource(ctx context.Context, gvk schema.GroupVersionKind c.log.Infof("Resource already deleted: %s/%s", gvk.Kind, name) return nil } - return errors.KubernetesError("failed to delete resource %s/%s (namespace: %s): %v", gvk.Kind, name, namespace, err) + return &apperrors.K8sOperationError{ + Operation: "delete", + Resource: name, + Kind: gvk.Kind, + Namespace: namespace, + Message: err.Error(), + Err: err, + } } c.log.Infof("Successfully deleted resource: %s/%s", gvk.Kind, name) @@ -289,7 +324,7 @@ func (c *Client) PatchResource(ctx context.Context, gvk schema.GroupVersionKind, // Parse patch data to validate JSON var patchObj map[string]interface{} if err := json.Unmarshal(patchData, &patchObj); err != nil { - return nil, errors.KubernetesError("invalid patch data: %v", err) + return nil, apperrors.KubernetesError("invalid patch data: %v", err) } // Create the resource reference @@ -308,7 +343,14 @@ func (c *Client) PatchResource(ctx context.Context, gvk schema.GroupVersionKind, if apierrors.IsNotFound(err) { return nil, err } - return nil, errors.KubernetesError("failed to patch resource %s/%s (namespace: %s): %v", gvk.Kind, name, namespace, err) + return nil, &apperrors.K8sOperationError{ + Operation: "patch", + Resource: name, + Kind: gvk.Kind, + Namespace: namespace, + Message: err.Error(), + Err: err, + } } c.log.Infof("Successfully patched resource: %s/%s", gvk.Kind, name) diff --git a/internal/k8s_client/data_extractor.go b/internal/k8s_client/data_extractor.go new file mode 100644 index 0000000..4821140 --- /dev/null +++ b/internal/k8s_client/data_extractor.go @@ -0,0 +1,114 @@ +package k8s_client + +import ( + "context" + "encoding/base64" + "fmt" + "strings" + + apperrors "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// ResourcePath represents a parsed Kubernetes resource path +type ResourcePath struct { + Namespace string + ResourceName string + Key string +} + +// ParseResourcePath parses a path in the format: namespace.name.key +func ParseResourcePath(path, resourceType string) (*ResourcePath, error) { + parts := strings.Split(path, ".") + if len(parts) < 3 { + return nil, apperrors.NewK8sInvalidPathError(resourceType, path, "namespace.name.key") + } + + return &ResourcePath{ + Namespace: parts[0], + ResourceName: parts[1], + Key: strings.Join(parts[2:], "."), // Allow dots in key name + }, nil +} + +// GetResourceData retrieves data from a Kubernetes resource (Secret or ConfigMap) +func (c *Client) GetResourceData(ctx context.Context, gvk schema.GroupVersionKind, namespace, name, resourceType string) (map[string]interface{}, error) { + resource, err := c.GetResource(ctx, gvk, namespace, name) + if err != nil { + return nil, apperrors.NewK8sResourceDataError(resourceType, namespace, name, "failed to get resource", err) + } + + data, found, err := unstructured.NestedMap(resource.Object, "data") + if err != nil { + return nil, apperrors.NewK8sResourceDataError(resourceType, namespace, name, "failed to access data field", err) + } + if !found { + return nil, apperrors.NewK8sResourceDataError(resourceType, namespace, name, "no data field found", nil) + } + + return data, nil +} + +// ExtractFromSecret extracts a value from a Kubernetes Secret +// Format: namespace.name.key (namespace is required) +func (c *Client) ExtractFromSecret(ctx context.Context, path string) (string, error) { + resourcePath, err := ParseResourcePath(path, "secret") + if err != nil { + return "", err + } + + secretGVK := schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Secret"} + data, err := c.GetResourceData(ctx, secretGVK, resourcePath.Namespace, resourcePath.ResourceName, "Secret") + if err != nil { + return "", err + } + + encodedValue, ok := data[resourcePath.Key] + if !ok { + return "", apperrors.NewK8sResourceKeyNotFoundError("Secret", resourcePath.Namespace, resourcePath.ResourceName, resourcePath.Key) + } + + encodedStr, ok := encodedValue.(string) + if !ok { + return "", apperrors.NewK8sResourceDataError("Secret", resourcePath.Namespace, resourcePath.ResourceName, + fmt.Sprintf("data for key '%s' is not a string", resourcePath.Key), nil) + } + + decodedBytes, err := base64.StdEncoding.DecodeString(encodedStr) + if err != nil { + return "", apperrors.NewK8sResourceDataError("Secret", resourcePath.Namespace, resourcePath.ResourceName, + fmt.Sprintf("failed to decode data for key '%s'", resourcePath.Key), err) + } + + return string(decodedBytes), nil +} + +// ExtractFromConfigMap extracts a value from a Kubernetes ConfigMap +// Format: namespace.name.key (namespace is required) +func (c *Client) ExtractFromConfigMap(ctx context.Context, path string) (string, error) { + resourcePath, err := ParseResourcePath(path, "configmap") + if err != nil { + return "", err + } + + configMapGVK := schema.GroupVersionKind{Group: "", Version: "v1", Kind: "ConfigMap"} + data, err := c.GetResourceData(ctx, configMapGVK, resourcePath.Namespace, resourcePath.ResourceName, "ConfigMap") + if err != nil { + return "", err + } + + value, ok := data[resourcePath.Key] + if !ok { + return "", apperrors.NewK8sResourceKeyNotFoundError("ConfigMap", resourcePath.Namespace, resourcePath.ResourceName, resourcePath.Key) + } + + valueStr, ok := value.(string) + if !ok { + return "", apperrors.NewK8sResourceDataError("ConfigMap", resourcePath.Namespace, resourcePath.ResourceName, + fmt.Sprintf("data for key '%s' is not a string", resourcePath.Key), nil) + } + + return valueStr, nil +} + diff --git a/internal/k8s_client/interface.go b/internal/k8s_client/interface.go new file mode 100644 index 0000000..4b496fc --- /dev/null +++ b/internal/k8s_client/interface.go @@ -0,0 +1,51 @@ +package k8s_client + +import ( + "context" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// K8sClient defines the interface for Kubernetes operations. +// This interface allows for easy mocking in unit tests without requiring +// a real Kubernetes cluster or DryRun mode. +type K8sClient interface { + // Resource CRUD operations + + // GetResource retrieves a single Kubernetes resource by GVK, namespace, and name. + // Returns the resource or an error if not found. + GetResource(ctx context.Context, gvk schema.GroupVersionKind, namespace, name string) (*unstructured.Unstructured, error) + + // CreateResource creates a new Kubernetes resource. + // Returns the created resource with server-generated fields populated. + CreateResource(ctx context.Context, obj *unstructured.Unstructured) (*unstructured.Unstructured, error) + + // UpdateResource updates an existing Kubernetes resource. + // The resource must have resourceVersion set for optimistic concurrency. + UpdateResource(ctx context.Context, obj *unstructured.Unstructured) (*unstructured.Unstructured, error) + + // DeleteResource deletes a Kubernetes resource by GVK, namespace, and name. + DeleteResource(ctx context.Context, gvk schema.GroupVersionKind, namespace, name string) error + + // Discovery operations + + // DiscoverResources discovers Kubernetes resources based on the Discovery configuration. + // If Discovery.IsSingleResource() is true, it fetches a single resource by name. + // Otherwise, it lists resources matching the label selector. + DiscoverResources(ctx context.Context, gvk schema.GroupVersionKind, discovery Discovery) (*unstructured.UnstructuredList, error) + + // Data extraction operations + + // ExtractFromSecret extracts a value from a Kubernetes Secret. + // Format: namespace.name.key (namespace is required) + ExtractFromSecret(ctx context.Context, path string) (string, error) + + // ExtractFromConfigMap extracts a value from a Kubernetes ConfigMap. + // Format: namespace.name.key (namespace is required) + ExtractFromConfigMap(ctx context.Context, path string) (string, error) +} + +// Ensure Client implements K8sClient interface +var _ K8sClient = (*Client)(nil) + diff --git a/internal/k8s_client/mock.go b/internal/k8s_client/mock.go new file mode 100644 index 0000000..3e711f0 --- /dev/null +++ b/internal/k8s_client/mock.go @@ -0,0 +1,129 @@ +package k8s_client + +import ( + "context" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// MockK8sClient implements K8sClient for testing. +// It stores resources in memory and allows configuring mock responses. +type MockK8sClient struct { + // Resources stores created/updated resources by "namespace/name" key + Resources map[string]*unstructured.Unstructured + + // Mock responses - set these to control behavior + GetResourceResult *unstructured.Unstructured + GetResourceError error + CreateResourceResult *unstructured.Unstructured + CreateResourceError error + UpdateResourceResult *unstructured.Unstructured + UpdateResourceError error + DeleteResourceError error + DiscoverResult *unstructured.UnstructuredList + DiscoverError error + ExtractSecretResult string + ExtractSecretError error + ExtractConfigResult string + ExtractConfigError error +} + +// NewMockK8sClient creates a new mock K8s client for testing +func NewMockK8sClient() *MockK8sClient { + return &MockK8sClient{ + Resources: make(map[string]*unstructured.Unstructured), + } +} + +// GetResource implements K8sClient.GetResource +// Returns a NotFound error when the resource doesn't exist, matching real K8s client behavior. +func (m *MockK8sClient) GetResource(ctx context.Context, gvk schema.GroupVersionKind, namespace, name string) (*unstructured.Unstructured, error) { + // Check explicit error override first + if m.GetResourceError != nil { + return nil, m.GetResourceError + } + // Check explicit result override + if m.GetResourceResult != nil { + return m.GetResourceResult, nil + } + // Check stored resources + key := namespace + "/" + name + if res, ok := m.Resources[key]; ok { + return res, nil + } + // Resource not found - return proper K8s NotFound error (matches real client behavior) + gr := schema.GroupResource{Group: gvk.Group, Resource: gvk.Kind} + return nil, apierrors.NewNotFound(gr, name) +} + +// CreateResource implements K8sClient.CreateResource +func (m *MockK8sClient) CreateResource(ctx context.Context, obj *unstructured.Unstructured) (*unstructured.Unstructured, error) { + if m.CreateResourceError != nil { + return nil, m.CreateResourceError + } + if m.CreateResourceResult != nil { + return m.CreateResourceResult, nil + } + // Store the resource + key := obj.GetNamespace() + "/" + obj.GetName() + m.Resources[key] = obj.DeepCopy() + return obj, nil +} + +// UpdateResource implements K8sClient.UpdateResource +func (m *MockK8sClient) UpdateResource(ctx context.Context, obj *unstructured.Unstructured) (*unstructured.Unstructured, error) { + if m.UpdateResourceError != nil { + return nil, m.UpdateResourceError + } + if m.UpdateResourceResult != nil { + return m.UpdateResourceResult, nil + } + // Store the resource + key := obj.GetNamespace() + "/" + obj.GetName() + m.Resources[key] = obj.DeepCopy() + return obj, nil +} + +// DeleteResource implements K8sClient.DeleteResource +func (m *MockK8sClient) DeleteResource(ctx context.Context, gvk schema.GroupVersionKind, namespace, name string) error { + if m.DeleteResourceError != nil { + return m.DeleteResourceError + } + // Remove from stored resources + key := namespace + "/" + name + delete(m.Resources, key) + return nil +} + +// DiscoverResources implements K8sClient.DiscoverResources +func (m *MockK8sClient) DiscoverResources(ctx context.Context, gvk schema.GroupVersionKind, discovery Discovery) (*unstructured.UnstructuredList, error) { + if m.DiscoverError != nil { + return nil, m.DiscoverError + } + if m.DiscoverResult != nil { + return m.DiscoverResult, nil + } + return &unstructured.UnstructuredList{}, nil +} + +// ExtractFromSecret implements K8sClient.ExtractFromSecret +func (m *MockK8sClient) ExtractFromSecret(ctx context.Context, path string) (string, error) { + if m.ExtractSecretError != nil { + return "", m.ExtractSecretError + } + return m.ExtractSecretResult, nil +} + +// ExtractFromConfigMap implements K8sClient.ExtractFromConfigMap +func (m *MockK8sClient) ExtractFromConfigMap(ctx context.Context, path string) (string, error) { + if m.ExtractConfigError != nil { + return "", m.ExtractConfigError + } + return m.ExtractConfigResult, nil +} + +// Ensure MockK8sClient implements K8sClient +var _ K8sClient = (*MockK8sClient)(nil) + diff --git a/pkg/errors/cel_error.go b/pkg/errors/cel_error.go new file mode 100644 index 0000000..4868b56 --- /dev/null +++ b/pkg/errors/cel_error.go @@ -0,0 +1,225 @@ +package errors + +import ( + "errors" + "fmt" +) + +// ----------------------------------------------------------------------------- +// CEL Error Types +// ----------------------------------------------------------------------------- + +// CELErrorType represents the type of CEL error +type CELErrorType string + +const ( + CELErrorTypeParse CELErrorType = "parse" + CELErrorTypeProgram CELErrorType = "program" + CELErrorTypeEval CELErrorType = "evaluation" +) + +// CELError represents an error during CEL expression processing +type CELError struct { + // Type is the error type (parse, program, evaluation) + Type CELErrorType + // Expression is the CEL expression that caused the error + Expression string + // Reason provides a human-readable error description + Reason string + // Err is the underlying error + Err error +} + +// Error implements the error interface +func (e *CELError) Error() string { + if e.Err != nil { + return fmt.Sprintf("CEL %s error for expression %q: %s: %v", e.Type, e.Expression, e.Reason, e.Err) + } + return fmt.Sprintf("CEL %s error for expression %q: %s", e.Type, e.Expression, e.Reason) +} + +// Unwrap returns the underlying error for errors.Is/As support +func (e *CELError) Unwrap() error { + return e.Err +} + +// NewCELParseError creates a new CEL parse error +func NewCELParseError(expression string, err error) *CELError { + return &CELError{ + Type: CELErrorTypeParse, + Expression: expression, + Reason: "failed to parse expression", + Err: err, + } +} + +// NewCELProgramError creates a new CEL program creation error +func NewCELProgramError(expression string, err error) *CELError { + return &CELError{ + Type: CELErrorTypeProgram, + Expression: expression, + Reason: "failed to create program", + Err: err, + } +} + +// NewCELEvalError creates a new CEL evaluation error +func NewCELEvalError(expression string, err error) *CELError { + if err == nil { + return nil + } + return &CELError{ + Type: CELErrorTypeEval, + Expression: expression, + Reason: err.Error(), + Err: err, + } +} + +// IsCELError checks if an error is a CELError and returns it +func IsCELError(err error) (*CELError, bool) { + var celErr *CELError + if errors.As(err, &celErr) { + return celErr, true + } + return nil, false +} + +// IsCELParseError checks if the error is a CEL parse error +func (e *CELError) IsParse() bool { + return e.Type == CELErrorTypeParse +} + +// IsCELProgramError checks if the error is a CEL program error +func (e *CELError) IsProgram() bool { + return e.Type == CELErrorTypeProgram +} + +// IsCELEvalError checks if the error is a CEL evaluation error +func (e *CELError) IsEval() bool { + return e.Type == CELErrorTypeEval +} + +// ----------------------------------------------------------------------------- +// CEL Environment Error +// ----------------------------------------------------------------------------- + +// CELEnvError represents an error when creating a CEL environment +type CELEnvError struct { + Reason string + Err error +} + +// Error implements the error interface +func (e *CELEnvError) Error() string { + if e.Err != nil { + return fmt.Sprintf("failed to create CEL environment: %s: %v", e.Reason, e.Err) + } + return fmt.Sprintf("failed to create CEL environment: %s", e.Reason) +} + +// Unwrap returns the underlying error +func (e *CELEnvError) Unwrap() error { + return e.Err +} + +// NewCELEnvError creates a new CEL environment error +func NewCELEnvError(reason string, err error) *CELEnvError { + return &CELEnvError{ + Reason: reason, + Err: err, + } +} + +// ----------------------------------------------------------------------------- +// CEL Conversion Errors +// ----------------------------------------------------------------------------- + +// CELConversionError represents an error during condition to CEL conversion +type CELConversionError struct { + // Field is the field being converted (for condition errors) + Field string + // Operator is the operator being used + Operator string + // ValueType is the type of value that couldn't be converted + ValueType string + // Reason provides context about what failed + Reason string + // Index is the condition index (for multiple conditions) + Index int + // Err is the underlying error + Err error +} + +// Error implements the error interface +func (e *CELConversionError) Error() string { + if e.Index >= 0 { + return fmt.Sprintf("failed to convert condition %d to CEL: %s", e.Index, e.Reason) + } + if e.Operator != "" { + return fmt.Sprintf("unsupported operator for CEL conversion: %s", e.Operator) + } + if e.ValueType != "" { + return fmt.Sprintf("unsupported type for CEL formatting: %s", e.ValueType) + } + return fmt.Sprintf("CEL conversion error: %s", e.Reason) +} + +// Unwrap returns the underlying error +func (e *CELConversionError) Unwrap() error { + return e.Err +} + +// NewCELUnsupportedOperatorError creates an error for unsupported operators +func NewCELUnsupportedOperatorError(operator string) *CELConversionError { + return &CELConversionError{ + Operator: operator, + Reason: fmt.Sprintf("operator %q is not supported for CEL conversion", operator), + Index: -1, + } +} + +// NewCELUnsupportedTypeError creates an error for unsupported types +func NewCELUnsupportedTypeError(valueType string) *CELConversionError { + return &CELConversionError{ + ValueType: valueType, + Reason: fmt.Sprintf("type %s cannot be formatted as CEL literal", valueType), + Index: -1, + } +} + +// NewCELConditionConversionError creates an error for condition conversion failures +func NewCELConditionConversionError(index int, err error) *CELConversionError { + return &CELConversionError{ + Index: index, + Reason: "condition conversion failed", + Err: err, + } +} + +// ----------------------------------------------------------------------------- +// CEL Type Mismatch Error +// ----------------------------------------------------------------------------- + +// CELTypeMismatchError represents a type mismatch when evaluating a CEL expression +type CELTypeMismatchError struct { + Expression string + ExpectedType string + ActualType string +} + +// Error implements the error interface +func (e *CELTypeMismatchError) Error() string { + return fmt.Sprintf("CEL expression %q returned %s, expected %s", + e.Expression, e.ActualType, e.ExpectedType) +} + +// NewCELTypeMismatchError creates a new CELTypeMismatchError +func NewCELTypeMismatchError(expression, expectedType, actualType string) *CELTypeMismatchError { + return &CELTypeMismatchError{ + Expression: expression, + ExpectedType: expectedType, + ActualType: actualType, + } +} + diff --git a/pkg/errors/k8s_error.go b/pkg/errors/k8s_error.go new file mode 100644 index 0000000..cb88c57 --- /dev/null +++ b/pkg/errors/k8s_error.go @@ -0,0 +1,193 @@ +package errors + +import ( + "errors" + "fmt" + + apierrors "k8s.io/apimachinery/pkg/api/errors" +) + +// ----------------------------------------------------------------------------- +// K8s Operation Error +// ----------------------------------------------------------------------------- + +// K8sOperationError represents a structured Kubernetes operation error with detailed context. +// This allows callers to handle K8s errors with full information about what failed. +type K8sOperationError struct { + // Operation is the operation that failed: "create", "update", "delete", "get", "patch", "list" + Operation string + // Resource is the resource name + Resource string + // Kind is the Kubernetes resource kind + Kind string + // Namespace is the resource namespace + Namespace string + // Message is the error message + Message string + // Err is the underlying error + Err error +} + +// Error implements the error interface +func (e *K8sOperationError) Error() string { + if e.Namespace != "" { + return fmt.Sprintf("K8s %s operation failed: %s/%s (namespace: %s): %s", + e.Operation, e.Kind, e.Resource, e.Namespace, e.Message) + } + return fmt.Sprintf("K8s %s operation failed: %s/%s: %s", + e.Operation, e.Kind, e.Resource, e.Message) +} + +// Unwrap returns the underlying error for errors.Is/As support +func (e *K8sOperationError) Unwrap() error { + return e.Err +} + +// IsK8sOperationError checks if an error is a K8sOperationError and returns it. +// This function supports wrapped errors. +func IsK8sOperationError(err error) (*K8sOperationError, bool) { + var k8sErr *K8sOperationError + if errors.As(err, &k8sErr) { + return k8sErr, true + } + return nil, false +} + +// ----------------------------------------------------------------------------- +// K8s Resource Data Extraction Errors +// ----------------------------------------------------------------------------- + +// K8sResourceKeyNotFoundError represents an error when a key is not found in a K8s resource +type K8sResourceKeyNotFoundError struct { + ResourceType string // "Secret" or "ConfigMap" + Namespace string + ResourceName string + Key string +} + +// Error implements the error interface +func (e *K8sResourceKeyNotFoundError) Error() string { + return fmt.Sprintf("key '%s' not found in %s %s/%s", e.Key, e.ResourceType, e.Namespace, e.ResourceName) +} + +// NewK8sResourceKeyNotFoundError creates a new K8sResourceKeyNotFoundError +func NewK8sResourceKeyNotFoundError(resourceType, namespace, resourceName, key string) *K8sResourceKeyNotFoundError { + return &K8sResourceKeyNotFoundError{ + ResourceType: resourceType, + Namespace: namespace, + ResourceName: resourceName, + Key: key, + } +} + +// K8sInvalidPathError represents an error when a resource path format is invalid +type K8sInvalidPathError struct { + ResourceType string + Path string + ExpectedFormat string +} + +// Error implements the error interface +func (e *K8sInvalidPathError) Error() string { + return fmt.Sprintf("invalid %s path format: %s (expected: %s)", e.ResourceType, e.Path, e.ExpectedFormat) +} + +// NewK8sInvalidPathError creates a new K8sInvalidPathError +func NewK8sInvalidPathError(resourceType, path, expectedFormat string) *K8sInvalidPathError { + return &K8sInvalidPathError{ + ResourceType: resourceType, + Path: path, + ExpectedFormat: expectedFormat, + } +} + +// K8sResourceDataError represents an error when accessing or parsing resource data +type K8sResourceDataError struct { + ResourceType string // "Secret" or "ConfigMap" + Namespace string + ResourceName string + Field string // e.g., "data", or specific key name + Reason string // What went wrong + Err error // Underlying error +} + +// Error implements the error interface +func (e *K8sResourceDataError) Error() string { + if e.Err != nil { + return fmt.Sprintf("%s %s/%s: %s: %v", e.ResourceType, e.Namespace, e.ResourceName, e.Reason, e.Err) + } + return fmt.Sprintf("%s %s/%s: %s", e.ResourceType, e.Namespace, e.ResourceName, e.Reason) +} + +// Unwrap returns the underlying error for errors.Is/As support +func (e *K8sResourceDataError) Unwrap() error { + return e.Err +} + +// NewK8sResourceDataError creates a new K8sResourceDataError +func NewK8sResourceDataError(resourceType, namespace, resourceName, reason string, err error) *K8sResourceDataError { + return &K8sResourceDataError{ + ResourceType: resourceType, + Namespace: namespace, + ResourceName: resourceName, + Reason: reason, + Err: err, + } +} + +// ----------------------------------------------------------------------------- +// K8s Retryable Error Detection +// ----------------------------------------------------------------------------- + +// IsRetryableDiscoveryError determines if a discovery error is transient/retryable +// (and thus safe to ignore and proceed with create) or fatal (and should fail fast). +// +// Retryable errors (returns true): +// - Timeouts (request/server timeouts) +// - Server errors (5xx status codes) +// - Network/connection errors (connection refused, reset, etc.) +// - Service unavailable +// - Too many requests (rate limiting) +// +// Non-retryable/fatal errors (returns false): +// - Forbidden (403) - permission denied +// - Unauthorized (401) - authentication failure +// - Bad request (400) - invalid request +// - Invalid/validation errors +// - Gone (410) - resource no longer exists +// - Method not supported +// - Not acceptable +func IsRetryableDiscoveryError(err error) bool { + if err == nil { + return false + } + + // Check for transient Kubernetes API errors (retryable) + if apierrors.IsTimeout(err) || + apierrors.IsServerTimeout(err) || + apierrors.IsServiceUnavailable(err) || + apierrors.IsInternalError(err) || + apierrors.IsTooManyRequests(err) { + return true + } + + // Check for fatal Kubernetes API errors (non-retryable) + if apierrors.IsForbidden(err) || + apierrors.IsUnauthorized(err) || + apierrors.IsBadRequest(err) || + apierrors.IsInvalid(err) || + apierrors.IsGone(err) || + apierrors.IsMethodNotSupported(err) || + apierrors.IsNotAcceptable(err) { + return false + } + + // Check for network-level errors (retryable) + if IsNetworkError(err) { + return true + } + + // Default: treat unknown errors as non-retryable to surface issues early + return false +} + diff --git a/pkg/errors/k8s_error_test.go b/pkg/errors/k8s_error_test.go new file mode 100644 index 0000000..88cfda4 --- /dev/null +++ b/pkg/errors/k8s_error_test.go @@ -0,0 +1,424 @@ +package errors + +import ( + "errors" + "fmt" + "net" + "os" + "syscall" + "testing" + + "github.com/stretchr/testify/assert" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Helper function to create K8s API errors for testing +func newStatusError(status metav1.Status) *apierrors.StatusError { + return &apierrors.StatusError{ErrStatus: status} +} + +func TestIsRetryableDiscoveryError_Nil(t *testing.T) { + assert.False(t, IsRetryableDiscoveryError(nil)) +} + +func TestIsRetryableDiscoveryError_RetryableK8sErrors(t *testing.T) { + tests := []struct { + name string + err error + }{ + { + name: "Timeout error", + err: newStatusError(metav1.Status{ + Reason: metav1.StatusReasonTimeout, + Code: 408, + }), + }, + { + name: "ServerTimeout error", + err: newStatusError(metav1.Status{ + Reason: metav1.StatusReasonServerTimeout, + Code: 504, + }), + }, + { + name: "ServiceUnavailable error", + err: newStatusError(metav1.Status{ + Reason: metav1.StatusReasonServiceUnavailable, + Code: 503, + }), + }, + { + name: "InternalError", + err: newStatusError(metav1.Status{ + Reason: metav1.StatusReasonInternalError, + Code: 500, + }), + }, + { + name: "TooManyRequests error", + err: newStatusError(metav1.Status{ + Reason: metav1.StatusReasonTooManyRequests, + Code: 429, + }), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.True(t, IsRetryableDiscoveryError(tt.err), + "Expected %s to be retryable", tt.name) + }) + } +} + +func TestIsRetryableDiscoveryError_NonRetryableK8sErrors(t *testing.T) { + tests := []struct { + name string + err error + }{ + { + name: "Forbidden error", + err: newStatusError(metav1.Status{ + Reason: metav1.StatusReasonForbidden, + Code: 403, + }), + }, + { + name: "Unauthorized error", + err: newStatusError(metav1.Status{ + Reason: metav1.StatusReasonUnauthorized, + Code: 401, + }), + }, + { + name: "BadRequest error", + err: newStatusError(metav1.Status{ + Reason: metav1.StatusReasonBadRequest, + Code: 400, + }), + }, + { + name: "Invalid error", + err: newStatusError(metav1.Status{ + Reason: metav1.StatusReasonInvalid, + Code: 422, + }), + }, + { + name: "Gone error", + err: newStatusError(metav1.Status{ + Reason: metav1.StatusReasonGone, + Code: 410, + }), + }, + { + name: "MethodNotAllowed error", + err: newStatusError(metav1.Status{ + Reason: metav1.StatusReasonMethodNotAllowed, + Code: 405, + }), + }, + { + name: "NotAcceptable error", + err: newStatusError(metav1.Status{ + Reason: metav1.StatusReasonNotAcceptable, + Code: 406, + }), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.False(t, IsRetryableDiscoveryError(tt.err), + "Expected %s to NOT be retryable (fatal error)", tt.name) + }) + } +} + +func TestIsRetryableDiscoveryError_NetworkErrors(t *testing.T) { + tests := []struct { + name string + err error + }{ + { + name: "Connection refused", + err: syscall.ECONNREFUSED, + }, + { + name: "Connection reset", + err: syscall.ECONNRESET, + }, + { + name: "Connection timed out", + err: syscall.ETIMEDOUT, + }, + { + name: "Network unreachable", + err: syscall.ENETUNREACH, + }, + { + name: "Broken pipe", + err: syscall.EPIPE, + }, + { + name: "Wrapped connection refused", + err: &net.OpError{ + Op: "dial", + Net: "tcp", + Err: &os.SyscallError{Syscall: "connect", Err: syscall.ECONNREFUSED}, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.True(t, IsRetryableDiscoveryError(tt.err), + "Expected network error %s to be retryable", tt.name) + }) + } +} + +func TestIsRetryableDiscoveryError_UnknownErrors(t *testing.T) { + tests := []struct { + name string + err error + }{ + { + name: "Generic error", + err: errors.New("some unknown error"), + }, + { + name: "Wrapped generic error", + err: fmt.Errorf("wrapped: %w", errors.New("inner error")), + }, + { + name: "Custom error type", + err: &customError{msg: "custom error"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Unknown errors should NOT be retryable to surface issues early + assert.False(t, IsRetryableDiscoveryError(tt.err), + "Expected unknown error to NOT be retryable (fail fast)") + }) + } +} + +func TestIsRetryableDiscoveryError_NotFoundError(t *testing.T) { + // NotFound is a special case - it's not an error in discovery context + // It just means the resource doesn't exist yet + notFoundErr := newStatusError(metav1.Status{ + Reason: metav1.StatusReasonNotFound, + Code: 404, + }) + + // NotFound should NOT be retryable (it's not transient, resource just doesn't exist) + // The caller should handle NotFound separately + assert.False(t, IsRetryableDiscoveryError(notFoundErr), + "NotFound should not be retryable - it's a definitive answer") +} + +func TestIsRetryableDiscoveryError_WrappedK8sErrors(t *testing.T) { + tests := []struct { + name string + err error + expected bool + }{ + { + name: "Wrapped timeout error", + err: fmt.Errorf("discovery failed: %w", newStatusError(metav1.Status{ + Reason: metav1.StatusReasonTimeout, + Code: 408, + })), + expected: true, + }, + { + name: "Wrapped forbidden error", + err: fmt.Errorf("discovery failed: %w", newStatusError(metav1.Status{ + Reason: metav1.StatusReasonForbidden, + Code: 403, + })), + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := IsRetryableDiscoveryError(tt.err) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestIsRetryableDiscoveryError_RetryBehaviorMatrix(t *testing.T) { + // This test documents the expected retry behavior for different error types + // It serves as documentation for operators and developers + + type testCase struct { + name string + err error + shouldRetry bool + expectedAction string + } + + tests := []testCase{ + // Transient errors - SHOULD RETRY + { + name: "API Server overloaded (429)", + err: newStatusError(metav1.Status{Reason: metav1.StatusReasonTooManyRequests, Code: 429}), + shouldRetry: true, + expectedAction: "Wait and retry - server is rate limiting", + }, + { + name: "API Server timeout (408)", + err: newStatusError(metav1.Status{Reason: metav1.StatusReasonTimeout, Code: 408}), + shouldRetry: true, + expectedAction: "Retry - request timed out", + }, + { + name: "Service unavailable (503)", + err: newStatusError(metav1.Status{Reason: metav1.StatusReasonServiceUnavailable, Code: 503}), + shouldRetry: true, + expectedAction: "Retry - API server temporarily unavailable", + }, + { + name: "Internal server error (500)", + err: newStatusError(metav1.Status{Reason: metav1.StatusReasonInternalError, Code: 500}), + shouldRetry: true, + expectedAction: "Retry - transient server error", + }, + { + name: "Network connection refused", + err: syscall.ECONNREFUSED, + shouldRetry: true, + expectedAction: "Retry - network connectivity issue", + }, + + // Fatal errors - SHOULD NOT RETRY + { + name: "Permission denied (403)", + err: newStatusError(metav1.Status{Reason: metav1.StatusReasonForbidden, Code: 403}), + shouldRetry: false, + expectedAction: "FAIL FAST - fix RBAC permissions", + }, + { + name: "Authentication failed (401)", + err: newStatusError(metav1.Status{Reason: metav1.StatusReasonUnauthorized, Code: 401}), + shouldRetry: false, + expectedAction: "FAIL FAST - fix authentication", + }, + { + name: "Invalid request (400)", + err: newStatusError(metav1.Status{Reason: metav1.StatusReasonBadRequest, Code: 400}), + shouldRetry: false, + expectedAction: "FAIL FAST - fix request format", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := IsRetryableDiscoveryError(tt.err) + assert.Equal(t, tt.shouldRetry, result, + "Error: %s\nExpected action: %s", tt.name, tt.expectedAction) + }) + } +} + +// customError is a test helper for unknown error types +type customError struct { + msg string +} + +func (e *customError) Error() string { + return e.msg +} + +// TestK8sOperationError tests the K8sOperationError type +func TestK8sOperationError(t *testing.T) { + t.Run("error with namespace", func(t *testing.T) { + err := &K8sOperationError{ + Operation: "create", + Resource: "my-pod", + Kind: "Pod", + Namespace: "default", + Message: "already exists", + } + + assert.Contains(t, err.Error(), "create") + assert.Contains(t, err.Error(), "Pod") + assert.Contains(t, err.Error(), "my-pod") + assert.Contains(t, err.Error(), "default") + assert.Contains(t, err.Error(), "already exists") + }) + + t.Run("error without namespace", func(t *testing.T) { + err := &K8sOperationError{ + Operation: "delete", + Resource: "my-namespace", + Kind: "Namespace", + Namespace: "", + Message: "not found", + } + + errStr := err.Error() + assert.Contains(t, errStr, "delete") + assert.Contains(t, errStr, "Namespace") + assert.NotContains(t, errStr, "(namespace:") + }) + + t.Run("unwrap underlying error", func(t *testing.T) { + underlyingErr := errors.New("connection refused") + err := &K8sOperationError{ + Operation: "get", + Resource: "my-pod", + Kind: "Pod", + Message: "failed", + Err: underlyingErr, + } + + assert.True(t, errors.Is(err, underlyingErr)) + }) +} + +func TestIsK8sOperationError(t *testing.T) { + t.Run("returns true for K8sOperationError", func(t *testing.T) { + err := &K8sOperationError{ + Operation: "create", + Resource: "test", + Kind: "Pod", + Message: "failed", + } + + k8sErr, ok := IsK8sOperationError(err) + assert.True(t, ok) + assert.NotNil(t, k8sErr) + assert.Equal(t, "create", k8sErr.Operation) + }) + + t.Run("returns true for wrapped K8sOperationError", func(t *testing.T) { + innerErr := &K8sOperationError{ + Operation: "update", + Resource: "test", + Kind: "ConfigMap", + Message: "conflict", + } + wrappedErr := fmt.Errorf("operation failed: %w", innerErr) + + k8sErr, ok := IsK8sOperationError(wrappedErr) + assert.True(t, ok) + assert.NotNil(t, k8sErr) + assert.Equal(t, "update", k8sErr.Operation) + }) + + t.Run("returns false for other errors", func(t *testing.T) { + err := errors.New("some error") + + k8sErr, ok := IsK8sOperationError(err) + assert.False(t, ok) + assert.Nil(t, k8sErr) + }) +} + diff --git a/pkg/errors/network_error.go b/pkg/errors/network_error.go new file mode 100644 index 0000000..89ac8ef --- /dev/null +++ b/pkg/errors/network_error.go @@ -0,0 +1,76 @@ +package errors + +import ( + "errors" + "net" + "syscall" + + utilnet "k8s.io/apimachinery/pkg/util/net" +) + +// IsNetworkError checks if the error is a network-level error (connection issues, DNS, etc.) +// Uses syscall error codes and k8s.io/apimachinery/pkg/util/net for stable, universal detection. +// +// Detected errors include: +// - Connection refused (ECONNREFUSED) +// - Connection reset (ECONNRESET) +// - Connection timed out (ETIMEDOUT) +// - Network unreachable (ENETUNREACH) +// - No route to host (EHOSTUNREACH) +// - Connection aborted (ECONNABORTED) +// - Broken pipe (EPIPE) +// - EOF and connection closure errors +// - Timeout errors (net.Error.Timeout()) +func IsNetworkError(err error) bool { + if err == nil { + return false + } + + // Use k8s.io/apimachinery/pkg/util/net utilities for common network errors + // These use syscall.Errno under the hood for stable detection + if utilnet.IsConnectionRefused(err) { + return true + } + if utilnet.IsConnectionReset(err) { + return true + } + if utilnet.IsTimeout(err) { + return true + } + if utilnet.IsProbableEOF(err) { + return true + } + + // Check for additional syscall errors not covered by utilnet + var errno syscall.Errno + if errors.As(err, &errno) { + switch errno { + case syscall.ETIMEDOUT: // Connection timed out + return true + case syscall.ENETUNREACH: // Network is unreachable + return true + case syscall.EHOSTUNREACH: // No route to host + return true + case syscall.ECONNABORTED: // Connection aborted + return true + case syscall.EPIPE: // Broken pipe + return true + } + } + + // Check for net.OpError which wraps network operation failures + var opErr *net.OpError + if errors.As(err, &opErr) { + // Recursively check the underlying error + return IsNetworkError(opErr.Err) + } + + // Check for net.Error interface (includes custom network errors) + var netErr net.Error + if errors.As(err, &netErr) { + return netErr.Timeout() + } + + return false +} + diff --git a/pkg/errors/network_error_test.go b/pkg/errors/network_error_test.go new file mode 100644 index 0000000..0173acb --- /dev/null +++ b/pkg/errors/network_error_test.go @@ -0,0 +1,335 @@ +package errors + +import ( + "errors" + "fmt" + "io" + "net" + "net/url" + "os" + "syscall" + "testing" + + "github.com/stretchr/testify/assert" +) + +// mockNetError implements net.Error for testing +type mockNetError struct { + timeout bool + temporary bool + msg string +} + +func (e *mockNetError) Error() string { return e.msg } +func (e *mockNetError) Timeout() bool { return e.timeout } +func (e *mockNetError) Temporary() bool { return e.temporary } + +func TestIsNetworkError_Nil(t *testing.T) { + assert.False(t, IsNetworkError(nil)) +} + +func TestIsNetworkError_SyscallErrors(t *testing.T) { + tests := []struct { + name string + err error + expected bool + }{ + { + name: "ECONNREFUSED - connection refused", + err: syscall.ECONNREFUSED, + expected: true, + }, + { + name: "ECONNRESET - connection reset", + err: syscall.ECONNRESET, + expected: true, + }, + { + name: "ETIMEDOUT - connection timed out", + err: syscall.ETIMEDOUT, + expected: true, + }, + { + name: "ENETUNREACH - network unreachable", + err: syscall.ENETUNREACH, + expected: true, + }, + { + name: "EHOSTUNREACH - no route to host", + err: syscall.EHOSTUNREACH, + expected: true, + }, + { + name: "ECONNABORTED - connection aborted", + err: syscall.ECONNABORTED, + expected: true, + }, + { + name: "EPIPE - broken pipe", + err: syscall.EPIPE, + expected: true, + }, + { + name: "ENOENT - not a network error", + err: syscall.ENOENT, + expected: false, + }, + { + name: "EACCES - not a network error", + err: syscall.EACCES, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := IsNetworkError(tt.err) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestIsNetworkError_WrappedSyscallErrors(t *testing.T) { + tests := []struct { + name string + err error + expected bool + }{ + { + name: "wrapped ECONNREFUSED", + err: fmt.Errorf("operation failed: %w", syscall.ECONNREFUSED), + expected: true, + }, + { + name: "double wrapped ECONNRESET", + err: fmt.Errorf("outer: %w", fmt.Errorf("inner: %w", syscall.ECONNRESET)), + expected: true, + }, + { + name: "os.SyscallError wrapping ETIMEDOUT", + err: &os.SyscallError{Syscall: "connect", Err: syscall.ETIMEDOUT}, + expected: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := IsNetworkError(tt.err) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestIsNetworkError_NetOpError(t *testing.T) { + tests := []struct { + name string + err error + expected bool + }{ + { + name: "OpError with ECONNREFUSED", + err: &net.OpError{ + Op: "dial", + Net: "tcp", + Err: &os.SyscallError{Syscall: "connect", Err: syscall.ECONNREFUSED}, + }, + expected: true, + }, + { + name: "OpError with ECONNRESET", + err: &net.OpError{ + Op: "read", + Net: "tcp", + Err: &os.SyscallError{Syscall: "read", Err: syscall.ECONNRESET}, + }, + expected: true, + }, + { + name: "OpError with timeout", + err: &net.OpError{ + Op: "dial", + Net: "tcp", + Err: &mockNetError{timeout: true, msg: "i/o timeout"}, + }, + expected: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := IsNetworkError(tt.err) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestIsNetworkError_URLError(t *testing.T) { + tests := []struct { + name string + err error + expected bool + }{ + { + name: "URL error with ECONNREFUSED", + err: &url.Error{ + Op: "Get", + URL: "http://localhost:9999", + Err: &net.OpError{ + Op: "dial", + Net: "tcp", + Err: &os.SyscallError{Syscall: "connect", Err: syscall.ECONNREFUSED}, + }, + }, + expected: true, + }, + { + name: "URL error with timeout", + err: &url.Error{ + Op: "Get", + URL: "http://example.com", + Err: &mockNetError{timeout: true, msg: "timeout"}, + }, + expected: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := IsNetworkError(tt.err) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestIsNetworkError_TimeoutErrors(t *testing.T) { + tests := []struct { + name string + err error + expected bool + }{ + { + name: "net.Error with timeout", + err: &mockNetError{timeout: true, msg: "connection timeout"}, + expected: true, + }, + { + name: "net.Error without timeout", + err: &mockNetError{timeout: false, msg: "some error"}, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := IsNetworkError(tt.err) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestIsNetworkError_EOFErrors(t *testing.T) { + tests := []struct { + name string + err error + expected bool + }{ + { + name: "io.EOF", + err: io.EOF, + expected: true, + }, + { + name: "io.ErrUnexpectedEOF", + err: io.ErrUnexpectedEOF, + expected: true, + }, + { + // Note: utilnet.IsProbableEOF checks error message, not wrapped errors + // Wrapped EOF is not detected by utilnet as it checks the error directly + name: "wrapped io.EOF - not detected by utilnet", + err: fmt.Errorf("read failed: %w", io.EOF), + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := IsNetworkError(tt.err) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestIsNetworkError_NonNetworkErrors(t *testing.T) { + tests := []struct { + name string + err error + }{ + { + name: "simple error", + err: errors.New("some error"), + }, + { + name: "file not found", + err: os.ErrNotExist, + }, + { + name: "permission denied error", + err: os.ErrPermission, + }, + { + name: "custom error", + err: fmt.Errorf("custom error: %s", "details"), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.False(t, IsNetworkError(tt.err), "Expected %v to NOT be a network error", tt.err) + }) + } +} + +func TestIsNetworkError_RealWorldScenarios(t *testing.T) { + t.Run("simulated connection refused", func(t *testing.T) { + // This simulates what happens when connecting to a closed port + err := &net.OpError{ + Op: "dial", + Net: "tcp", + Addr: &net.TCPAddr{ + IP: net.ParseIP("127.0.0.1"), + Port: 59999, + }, + Err: &os.SyscallError{ + Syscall: "connect", + Err: syscall.ECONNREFUSED, + }, + } + assert.True(t, IsNetworkError(err)) + }) + + t.Run("simulated connection reset by peer", func(t *testing.T) { + err := &net.OpError{ + Op: "read", + Net: "tcp", + Err: &os.SyscallError{ + Syscall: "read", + Err: syscall.ECONNRESET, + }, + } + assert.True(t, IsNetworkError(err)) + }) + + t.Run("simulated DNS lookup failure", func(t *testing.T) { + // DNS errors are typically wrapped in net.DNSError + err := &net.DNSError{ + Err: "no such host", + Name: "nonexistent.invalid", + IsNotFound: true, + } + // DNS errors are not timeout errors, so should return false + // unless they have a specific network cause + assert.False(t, IsNetworkError(err)) + }) +} + diff --git a/pkg/logger/logger.go b/pkg/logger/logger.go index eeec0b8..aa688ff 100644 --- a/pkg/logger/logger.go +++ b/pkg/logger/logger.go @@ -57,8 +57,8 @@ func (l *logger) prepareLogPrefix(message string, extra extra) string { prefix = fmt.Sprintf("[cluster_id=%s]%s", clusterID, prefix) } - if opid, ok := l.context.Value(OpIDKey).(string); ok { - prefix = fmt.Sprintf("[opid=%s]%s", opid, prefix) + if evtid, ok := l.context.Value(EvtIDKey).(string); ok { + prefix = fmt.Sprintf("[%s=%s]%s", EvtIDKey, evtid, prefix) } var args []string @@ -89,18 +89,30 @@ func (l *logger) prepareLogPrefixf(format string, args ...interface{}) string { prefix = fmt.Sprintf("[cluster_id=%s]%s", clusterID, prefix) } - if opid, ok := l.context.Value(OpIDKey).(string); ok { - prefix = fmt.Sprintf("[opid=%s]%s", opid, prefix) + if evtid, ok := l.context.Value(EvtIDKey).(string); ok { + prefix = fmt.Sprintf("[%s=%s]%s", EvtIDKey, evtid, prefix) } return fmt.Sprintf("%s%s", prefix, orig) } +// copyExtra creates a deep copy of the extra map to avoid shared state bugs +func copyExtra(e extra) extra { + if e == nil { + return make(extra) + } + newExtra := make(extra, len(e)) + for k, v := range e { + newExtra[k] = v + } + return newExtra +} + func (l *logger) V(level int32) Logger { return &logger{ context: l.context, level: level, - extra: l.extra, + extra: copyExtra(l.extra), } } @@ -151,6 +163,23 @@ const ( AdapterIDKey contextKey = "adapter_id" // ClusterIDKey is the context key for cluster ID ClusterIDKey contextKey = "cluster_id" - // OpIDKey is the context key for operation ID - OpIDKey contextKey = "opid" + // EvtIDKey is the context key for event ID + EvtIDKey contextKey = "evt_id" ) + +// WithEventID wraps a logger to add event ID to all log messages as evtid. +// This works with any Logger implementation (including test loggers). +func WithEventID(log Logger, eventID string) Logger { + // If the logger is our internal logger type, create a new one with updated context + if l, ok := log.(*logger); ok { + ctx := context.WithValue(l.context, EvtIDKey, eventID) + return &logger{ + context: ctx, + level: l.level, + extra: copyExtra(l.extra), + } + } + // For other logger implementations (like test loggers), return as-is + // They should extract event ID from context if needed + return log +} diff --git a/pkg/logger/logger_test.go b/pkg/logger/logger_test.go index 3bf6908..f652eef 100644 --- a/pkg/logger/logger_test.go +++ b/pkg/logger/logger_test.go @@ -164,12 +164,12 @@ func TestLoggerContextValues(t *testing.T) { expectedValue: "cluster-1", }, { - name: "context_with_opid", + name: "context_with_evtid", ctxSetup: func() context.Context { - return context.WithValue(context.Background(), OpIDKey, "op-12345") + return context.WithValue(context.Background(), EvtIDKey, "evt-12345") }, - expectedKey: OpIDKey, - expectedValue: "op-12345", + expectedKey: EvtIDKey, + expectedValue: "evt-12345", }, } @@ -287,9 +287,9 @@ func TestLoggerConstants(t *testing.T) { expected: "cluster_id", }, { - name: "OpIDKey", - constant: OpIDKey, - expected: "opid", + name: "EvtIDKey", + constant: EvtIDKey, + expected: "evt_id", }, } diff --git a/test/integration/config-loader/config_criteria_integration_test.go b/test/integration/config-loader/config_criteria_integration_test.go index 1e48b84..199ea05 100644 --- a/test/integration/config-loader/config_criteria_integration_test.go +++ b/test/integration/config-loader/config_criteria_integration_test.go @@ -60,7 +60,7 @@ func TestConfigLoadAndCriteriaEvaluation(t *testing.T) { }, }) - evaluator := criteria.NewEvaluator(ctx) + evaluator := criteria.NewEvaluator(ctx, nil) t.Run("evaluate precondition conditions from config", func(t *testing.T) { // Find the clusterStatus precondition @@ -147,7 +147,7 @@ func TestConfigConditionsToCEL(t *testing.T) { ctx.Set("cloudProvider", "aws") ctx.Set("vpcId", "vpc-12345") - evaluator := criteria.NewEvaluator(ctx) + evaluator := criteria.NewEvaluator(ctx, nil) conditions := make([]criteria.ConditionDef, len(precond.Conditions)) for i, cond := range precond.Conditions { @@ -180,7 +180,7 @@ func TestConfigWithFailingPreconditions(t *testing.T) { ctx.Set("cloudProvider", "aws") ctx.Set("vpcId", "vpc-12345") - evaluator := criteria.NewEvaluator(ctx) + evaluator := criteria.NewEvaluator(ctx, nil) conditions := make([]criteria.ConditionDef, len(precond.Conditions)) for i, cond := range precond.Conditions { @@ -203,7 +203,7 @@ func TestConfigWithFailingPreconditions(t *testing.T) { ctx.Set("cloudProvider", "onprem") // Not in allowed list ctx.Set("vpcId", "vpc-12345") - evaluator := criteria.NewEvaluator(ctx) + evaluator := criteria.NewEvaluator(ctx, nil) conditions := make([]criteria.ConditionDef, len(precond.Conditions)) for i, cond := range precond.Conditions { @@ -225,7 +225,7 @@ func TestConfigWithFailingPreconditions(t *testing.T) { ctx.Set("cloudProvider", "aws") // vpcId not set - should fail exists check - evaluator := criteria.NewEvaluator(ctx) + evaluator := criteria.NewEvaluator(ctx, nil) // Just check the vpcId exists condition result := evaluator.EvaluateConditionSafe("vpcId", criteria.OperatorExists, true) @@ -312,7 +312,7 @@ func TestConfigPostProcessingEvaluation(t *testing.T) { }, }) - evaluator := criteria.NewEvaluator(ctx) + evaluator := criteria.NewEvaluator(ctx, nil) // Test accessing nested K8s resource data t.Run("access namespace status", func(t *testing.T) { @@ -374,7 +374,7 @@ func TestConfigNullSafetyWithMissingResources(t *testing.T) { "clusterController": nil, // Not created yet }) - evaluator := criteria.NewEvaluator(ctx) + evaluator := criteria.NewEvaluator(ctx, nil) // Safe access to missing resource value := evaluator.GetFieldSafe("resources.clusterController.status.readyReplicas") @@ -404,7 +404,7 @@ func TestConfigNullSafetyWithMissingResources(t *testing.T) { }, }) - evaluator := criteria.NewEvaluator(ctx) + evaluator := criteria.NewEvaluator(ctx, nil) // Should handle null status gracefully value := evaluator.GetFieldOrDefault("resources.clusterController.status.readyReplicas", -1) diff --git a/test/integration/executor/executor_integration_test.go b/test/integration/executor/executor_integration_test.go new file mode 100644 index 0000000..1ed435a --- /dev/null +++ b/test/integration/executor/executor_integration_test.go @@ -0,0 +1,1403 @@ +package executor_integration_test + +import ( + "context" + _ "embed" + "encoding/json" + "fmt" + "net/http" + "os" + "strings" + "sync" + "testing" + "time" + + "github.com/cloudevents/sdk-go/v2/event" + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/config_loader" + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/executor" + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/hyperfleet_api" + "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/logger" + "github.com/openshift-hyperfleet/hyperfleet-adapter/test/integration/testutil" + "github.com/stretchr/testify/assert" + "gopkg.in/yaml.v3" +) + +// getK8sEnvForTest returns the K8s environment for integration testing. +// Uses real K8s from testcontainers. Skips tests if testcontainers are unavailable. +func getK8sEnvForTest(t *testing.T) *K8sTestEnv { + t.Helper() + // Use shared K8s environment from testcontainers + if sharedK8sEnv != nil { + return sharedK8sEnv + } + // Integration tests require real testcontainers - skip if unavailable + t.Skip("Integration tests require testcontainers (set INTEGRATION_ENVTEST_IMAGE)") + return nil +} + +//go:embed testdata/test-adapter-config.yaml +var testAdapterConfigYAML []byte + +// createTestEvent creates a CloudEvent for testing +func createTestEvent(clusterId, resourceId string) *event.Event { + evt := event.New() + evt.SetID("test-event-" + clusterId) + evt.SetType("com.redhat.hyperfleet.cluster.provision") + evt.SetSource("test") + evt.SetTime(time.Now()) + + eventData := map[string]interface{}{ + "cluster_id": clusterId, + "resource_id": resourceId, + "resource_type": "cluster", + "generation": "gen-001", + "href": "/api/v1/clusters/" + clusterId, + } + eventDataBytes, _ := json.Marshal(eventData) + _ = evt.SetData(event.ApplicationJSON, eventDataBytes) + + return &evt +} + +// createTestConfig creates an AdapterConfig for testing +// createTestConfig loads the test adapter configuration from embedded YAML +func createTestConfig(apiBaseURL string) *config_loader.AdapterConfig { + var config config_loader.AdapterConfig + + if err := yaml.Unmarshal(testAdapterConfigYAML, &config); err != nil { + panic(fmt.Sprintf("failed to parse test config: %v", err)) + } + + // The apiBaseURL parameter is kept for compatibility but not needed + // since the config uses env.HYPERFLEET_API_BASE_URL which is set via t.Setenv + + return &config +} + +func TestExecutor_FullFlow_Success(t *testing.T) { + // Setup mock API server + mockAPI := testutil.NewMockAPIServer(t) + defer mockAPI.Close() + + // Set environment variables + t.Setenv("HYPERFLEET_API_BASE_URL", mockAPI.URL()) + t.Setenv("HYPERFLEET_API_VERSION", "v1") + + // Get K8s environment from testcontainers + k8sEnv := getK8sEnvForTest(t) + + // Create config and executor + config := createTestConfig(mockAPI.URL()) + apiClient, _ := hyperfleet_api.NewClient( + hyperfleet_api.WithTimeout(10*time.Second), + hyperfleet_api.WithRetryAttempts(1), + ) + + exec, err := executor.NewBuilder(). + WithAdapterConfig(config). + WithAPIClient(apiClient). + WithLogger(k8sEnv.Log). + WithK8sClient(k8sEnv.Client). + Build() + if err != nil { + t.Fatalf("Failed to create executor: %v", err) + } + + // Create test event + evt := createTestEvent("cluster-123", "resource-456") + + // Execute + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + result := exec.Execute(ctx, evt) + + // Verify result + if result.Status != executor.StatusSuccess { + t.Errorf("Expected success status, got %s: %v", result.Status, result.Error) + } + + if result.EventID != evt.ID() { + t.Errorf("Expected event ID %s, got %s", evt.ID(), result.EventID) + } + + // Verify params were extracted + if result.Params["clusterId"] != "cluster-123" { + t.Errorf("Expected clusterId 'cluster-123', got '%v'", result.Params["clusterId"]) + } + + // Verify preconditions passed + if len(result.PreconditionResults) != 1 { + t.Errorf("Expected 1 precondition result, got %d", len(result.PreconditionResults)) + } else { + precondResult := result.PreconditionResults[0] + if !precondResult.Matched { + t.Errorf("Expected precondition to match, but it didn't") + } + if precondResult.CapturedFields["clusterName"] != "test-cluster" { + t.Errorf("Expected captured clusterName 'test-cluster', got '%v'", precondResult.CapturedFields["clusterName"]) + } + } + + // Verify post actions executed + if len(result.PostActionResults) != 1 { + t.Errorf("Expected 1 post action result, got %d", len(result.PostActionResults)) + } else { + postResult := result.PostActionResults[0] + if postResult.Status != executor.StatusSuccess { + t.Errorf("Expected post action success, got %s: %v", postResult.Status, postResult.Error) + } + if !postResult.APICallMade { + t.Error("Expected API call to be made in post action") + } + } + + // Verify API calls were made + requests := mockAPI.GetRequests() + if len(requests) < 2 { + t.Errorf("Expected at least 2 API requests (precondition + post action), got %d", len(requests)) + } + + // Verify status was posted with correct template expression values + statusResponses := mockAPI.GetStatusResponses() + if len(statusResponses) != 1 { + t.Errorf("Expected 1 status response, got %d", len(statusResponses)) + } else { + status := statusResponses[0] + t.Logf("Status payload received: %+v", status) + + // Check that status contains expected fields with correct values from template + if conditions, ok := status["conditions"].(map[string]interface{}); ok { + if health, ok := conditions["health"].(map[string]interface{}); ok { + // Status should be true (adapter.executionStatus == "success") + if health["status"] != true { + t.Errorf("Expected health.status to be true, got %v", health["status"]) + } + + // Reason should be "Healthy" (default since no adapter.errorReason) + if reason, ok := health["reason"].(string); ok { + if reason != "Healthy" { + t.Errorf("Expected health.reason to be 'Healthy', got '%s'", reason) + } + } else { + t.Error("Expected health.reason to be a string") + } + + // Message should be default success message (no adapter.errorMessage) + if message, ok := health["message"].(string); ok { + if message != "All adapter operations completed successfully" { + t.Errorf("Expected health.message to be default success message, got '%s'", message) + } + } else { + t.Error("Expected health.message to be a string") + } + } else { + t.Error("Expected health condition in status") + } + } else { + t.Error("Expected conditions in status payload") + } + } + + t.Logf("Execution completed successfully") +} + +func TestExecutor_PreconditionNotMet(t *testing.T) { + // Setup mock API server that returns a cluster in "Terminating" phase + mockAPI := testutil.NewMockAPIServer(t) + defer mockAPI.Close() + + // Set cluster to a phase that doesn't match conditions + mockAPI.SetClusterResponse(map[string]interface{}{ + "metadata": map[string]interface{}{ + "name": "test-cluster", + }, + "spec": map[string]interface{}{ + "region": "us-east-1", + "provider": "aws", + "vpc_id": "vpc-12345", + }, + "status": map[string]interface{}{ + "phase": "Terminating", // This won't match the condition + }, + }) + + // Set environment variables + t.Setenv("HYPERFLEET_API_BASE_URL", mockAPI.URL()) + t.Setenv("HYPERFLEET_API_VERSION", "v1") + + // Get K8s environment from testcontainers + k8sEnv := getK8sEnvForTest(t) + + // Create config and executor + config := createTestConfig(mockAPI.URL()) + apiClient, _ := hyperfleet_api.NewClient() + + exec, err := executor.NewBuilder(). + WithAdapterConfig(config). + WithAPIClient(apiClient). + WithLogger(k8sEnv.Log). + WithK8sClient(k8sEnv.Client). + Build() + if err != nil { + t.Fatalf("Failed to create executor: %v", err) + } + + // Execute + evt := createTestEvent("cluster-456", "resource-789") + ctx := context.Background() + result := exec.Execute(ctx, evt) + + // Verify result - should be success with resources skipped (precondition not met is valid outcome) + if result.Status != executor.StatusSuccess { + t.Errorf("Expected success status (precondition not met is valid), got %s", result.Status) + } + if !result.ResourcesSkipped { + t.Error("Expected ResourcesSkipped to be true") + } + + // Verify precondition was not matched + if len(result.PreconditionResults) != 1 { + t.Errorf("Expected 1 precondition result, got %d", len(result.PreconditionResults)) + } else { + if result.PreconditionResults[0].Matched { + t.Error("Expected precondition to NOT match") + } + } + + // Post actions should still execute (to report the error) + if len(result.PostActionResults) != 1 { + t.Errorf("Expected 1 post action result (error reporting), got %d", len(result.PostActionResults)) + } + + // Verify the status payload reflects skipped execution (not an error) + statusResponses := mockAPI.GetStatusResponses() + if len(statusResponses) == 1 { + status := statusResponses[0] + t.Logf("Status payload: %+v", status) + + if conditions, ok := status["conditions"].(map[string]interface{}); ok { + if health, ok := conditions["health"].(map[string]interface{}); ok { + // Health status should be false because adapter.executionStatus != "success" + if health["status"] != false { + t.Errorf("Expected health.status to be false for precondition not met, got %v", health["status"]) + } + + // Reason should contain error (from adapter.errorReason, not "Healthy") + if reason, ok := health["reason"].(string); ok { + if reason == "Healthy" { + t.Error("Expected health.reason to indicate precondition not met, got 'Healthy'") + } + t.Logf("Health reason: %s", reason) + } + + // Message should contain error explanation (from adapter.errorMessage) + if message, ok := health["message"].(string); ok { + if message == "All adapter operations completed successfully" { + t.Error("Expected health.message to explain precondition not met, got default success message") + } + t.Logf("Health message: %s", message) + } + } + } + } + + // Verify execution context shows adapter is healthy (executionStatus = "success", resources skipped) + if result.ExecutionContext != nil { + if result.ExecutionContext.Adapter.ExecutionStatus != string(executor.StatusSuccess) { + t.Errorf("Expected adapter.executionStatus to be 'success', got '%s'", + result.ExecutionContext.Adapter.ExecutionStatus) + } + // No executionError should be set - precondition not matching is not an error + if result.ExecutionContext.Adapter.ExecutionError != nil { + t.Errorf("Expected no executionError for precondition not met, got %+v", + result.ExecutionContext.Adapter.ExecutionError) + } + } + + t.Logf("Execution completed with expected precondition skip") +} + +func TestExecutor_PreconditionAPIFailure(t *testing.T) { + // Setup mock API server that fails precondition API call + mockAPI := testutil.NewMockAPIServer(t) + defer mockAPI.Close() + mockAPI.SetFailPrecondition(true) // API will return 404 + + // Set environment variables + t.Setenv("HYPERFLEET_API_BASE_URL", mockAPI.URL()) + t.Setenv("HYPERFLEET_API_VERSION", "v1") + + // Get K8s environment from testcontainers + k8sEnv := getK8sEnvForTest(t) + + // Create config and executor + config := createTestConfig(mockAPI.URL()) + apiClient, _ := hyperfleet_api.NewClient( + hyperfleet_api.WithRetryAttempts(1), + ) + + exec, err := executor.NewBuilder(). + WithAdapterConfig(config). + WithAPIClient(apiClient). + WithLogger(k8sEnv.Log). + WithK8sClient(k8sEnv.Client). + Build() + if err != nil { + t.Fatalf("Failed to create executor: %v", err) + } + + // Execute + evt := createTestEvent("cluster-notfound", "resource-000") + ctx := context.Background() + result := exec.Execute(ctx, evt) + + // Verify result - should be failed (API error) + if result.Status != executor.StatusFailed { + t.Errorf("Expected failed status, got %s", result.Status) + } + + if result.Error == nil { + t.Error("Expected error to be set") + } + + // Verify resources were not processed due to precondition failure + if len(result.ResourceResults) > 0 { + t.Errorf("Expected no resources to be processed on API error, got %d", len(result.ResourceResults)) + } + + // Verify post actions still executed to report the error + if len(result.PostActionResults) != 1 { + t.Errorf("Expected 1 post action result (error reporting), got %d", len(result.PostActionResults)) + } + + // Verify status payload contains adapter error fields + statusResponses := mockAPI.GetStatusResponses() + if len(statusResponses) != 1 { + t.Errorf("Expected 1 status response, got %d", len(statusResponses)) + } else { + status := statusResponses[0] + t.Logf("Error status payload: %+v", status) + + // Verify health condition with adapter.xxx fields + if conditions, ok := status["conditions"].(map[string]interface{}); ok { + if health, ok := conditions["health"].(map[string]interface{}); ok { + // Status should be false because adapter.executionStatus != "success" + if health["status"] != false { + t.Errorf("Expected health.status to be false for API error, got %v", health["status"]) + } + + // Reason should contain error reason (not "Healthy") + if reason, ok := health["reason"].(string); ok { + if reason == "Healthy" { + t.Error("Expected health.reason to contain error, got 'Healthy'") + } + t.Logf("Health reason: %s", reason) + } else { + t.Error("Expected health.reason to be a string") + } + + // Message should contain error message (not default success message) + if message, ok := health["message"].(string); ok { + if message == "All adapter operations completed successfully" { + t.Error("Expected health.message to contain error, got default success message") + } + t.Logf("Health message: %s", message) + } else { + t.Error("Expected health.message to be a string") + } + } else { + t.Error("Expected health condition in status") + } + } else { + t.Error("Expected conditions in status payload") + } + } + + t.Logf("Execution failed as expected: %v", result.Error) +} + +func TestExecutor_CELExpressionEvaluation(t *testing.T) { + // Setup mock API server + mockAPI := testutil.NewMockAPIServer(t) + defer mockAPI.Close() + + // Set environment variables + t.Setenv("HYPERFLEET_API_BASE_URL", mockAPI.URL()) + t.Setenv("HYPERFLEET_API_VERSION", "v1") + + // Create config with CEL expression precondition + config := createTestConfig(mockAPI.URL()) + config.Spec.Preconditions = []config_loader.Precondition{ + { + Name: "clusterStatus", + APICall: &config_loader.APICall{ + Method: "GET", + URL: "{{ .hyperfleetApiBaseUrl }}/api/{{ .hyperfleetApiVersion }}/clusters/{{ .clusterId }}", + Timeout: "5s", + }, + Capture: []config_loader.CaptureField{ + {Name: "clusterName", Field: "metadata.name"}, + {Name: "clusterPhase", Field: "status.phase"}, + {Name: "nodeCount", Field: "spec.node_count"}, + }, + // Use CEL expression instead of structured conditions + Expression: `clusterPhase == "Ready" && nodeCount >= 3`, + }, + } + + apiClient, _ := hyperfleet_api.NewClient() + + exec, err := executor.NewBuilder(). + WithAdapterConfig(config). + WithAPIClient(apiClient). + WithLogger(getK8sEnvForTest(t).Log). + WithK8sClient(getK8sEnvForTest(t).Client). + Build() + if err != nil { + t.Fatalf("Failed to create executor: %v", err) + } + + // Execute + evt := createTestEvent("cluster-cel-test", "resource-cel") + ctx := context.Background() + result := exec.Execute(ctx, evt) + + // Verify CEL evaluation passed + if result.Status != executor.StatusSuccess { + t.Errorf("Expected success status, got %s: %v", result.Status, result.Error) + } + + if len(result.PreconditionResults) != 1 { + t.Fatalf("Expected 1 precondition result, got %d", len(result.PreconditionResults)) + } + + precondResult := result.PreconditionResults[0] + if !precondResult.Matched { + t.Error("Expected CEL expression to evaluate to true") + } + + // Verify CEL result was recorded + if precondResult.CELResult == nil { + t.Error("Expected CEL result to be recorded") + } else { + t.Logf("CEL expression result: matched=%v, value=%v", precondResult.CELResult.Matched, precondResult.CELResult.Value) + } +} + +func TestExecutor_MultipleMessages(t *testing.T) { + // Test that multiple messages can be processed with isolated contexts + mockAPI := testutil.NewMockAPIServer(t) + defer mockAPI.Close() + + t.Setenv("HYPERFLEET_API_BASE_URL", mockAPI.URL()) + t.Setenv("HYPERFLEET_API_VERSION", "v1") + + config := createTestConfig(mockAPI.URL()) + apiClient, _ := hyperfleet_api.NewClient() + + exec, err := executor.NewBuilder(). + WithAdapterConfig(config). + WithAPIClient(apiClient). + WithLogger(getK8sEnvForTest(t).Log). + WithK8sClient(getK8sEnvForTest(t).Client). + Build() + if err != nil { + t.Fatalf("Failed to create executor: %v", err) + } + + // Process multiple messages + clusterIds := []string{"cluster-a", "cluster-b", "cluster-c"} + results := make([]*executor.ExecutionResult, len(clusterIds)) + + for i, clusterId := range clusterIds { + evt := createTestEvent(clusterId, fmt.Sprintf("resource-%d", i)) + results[i] = exec.Execute(context.Background(), evt) + } + + // Verify all succeeded with isolated params + for i, result := range results { + if result.Status != executor.StatusSuccess { + t.Errorf("Message %d failed: %v", i, result.Error) + continue + } + + // Verify each message had its own clusterId + expectedClusterId := clusterIds[i] + if result.Params["clusterId"] != expectedClusterId { + t.Errorf("Message %d: expected clusterId '%s', got '%v'", i, expectedClusterId, result.Params["clusterId"]) + } + } + + // Verify we got separate status posts for each message + statusResponses := mockAPI.GetStatusResponses() + if len(statusResponses) != len(clusterIds) { + t.Errorf("Expected %d status responses, got %d", len(clusterIds), len(statusResponses)) + } + + t.Logf("Successfully processed %d messages with isolated contexts", len(clusterIds)) +} + +func TestExecutor_Handler_Integration(t *testing.T) { + // Test the CreateHandler function that would be used with broker + mockAPI := testutil.NewMockAPIServer(t) + defer mockAPI.Close() + + t.Setenv("HYPERFLEET_API_BASE_URL", mockAPI.URL()) + t.Setenv("HYPERFLEET_API_VERSION", "v1") + + config := createTestConfig(mockAPI.URL()) + apiClient, _ := hyperfleet_api.NewClient() + + exec, err := executor.NewBuilder(). + WithAdapterConfig(config). + WithAPIClient(apiClient). + WithLogger(getK8sEnvForTest(t).Log). + WithK8sClient(getK8sEnvForTest(t).Client). + Build() + if err != nil { + t.Fatalf("Failed to create executor: %v", err) + } + + // Get the handler function + handler := exec.CreateHandler() + + // Simulate broker calling the handler + evt := createTestEvent("cluster-handler-test", "resource-handler") + ctx := context.Background() + + err = handler(ctx, evt) + + // Handler should return nil on success + if err != nil { + t.Errorf("Handler returned error: %v", err) + } + + // Verify API calls were made + requests := mockAPI.GetRequests() + if len(requests) < 2 { + t.Errorf("Expected at least 2 API requests, got %d", len(requests)) + } + + t.Log("Handler integration test passed") +} + +func TestExecutor_Handler_PreconditionNotMet_ReturnsNil(t *testing.T) { + // When preconditions aren't met, handler should return nil (not an error) + // because it's expected behavior, not a system failure + mockAPI := testutil.NewMockAPIServer(t) + defer mockAPI.Close() + + mockAPI.SetClusterResponse(map[string]interface{}{ + "metadata": map[string]interface{}{"name": "test"}, + "spec": map[string]interface{}{"region": "us-east-1", "provider": "aws"}, + "status": map[string]interface{}{"phase": "Terminating"}, // Won't match + }) + + t.Setenv("HYPERFLEET_API_BASE_URL", mockAPI.URL()) + t.Setenv("HYPERFLEET_API_VERSION", "v1") + + config := createTestConfig(mockAPI.URL()) + apiClient, _ := hyperfleet_api.NewClient() + + exec, err := executor.NewBuilder(). + WithAdapterConfig(config). + WithAPIClient(apiClient). + WithLogger(getK8sEnvForTest(t).Log). + WithK8sClient(getK8sEnvForTest(t).Client). + Build() + if err != nil { + t.Fatalf("Failed to create executor: %v", err) + } + + handler := exec.CreateHandler() + evt := createTestEvent("cluster-skip", "resource-skip") + + // Handler should return nil even when precondition not met + err = handler(context.Background(), evt) + if err != nil { + t.Errorf("Handler should return nil for precondition not met, got: %v", err) + } + + t.Log("Handler correctly returns nil for skipped execution") +} + +func TestExecutor_ContextCancellation(t *testing.T) { + // Test that context cancellation is respected + mockAPI := testutil.NewMockAPIServer(t) + defer mockAPI.Close() + + t.Setenv("HYPERFLEET_API_BASE_URL", mockAPI.URL()) + t.Setenv("HYPERFLEET_API_VERSION", "v1") + + config := createTestConfig(mockAPI.URL()) + apiClient, _ := hyperfleet_api.NewClient() + + exec, err := executor.NewBuilder(). + WithAdapterConfig(config). + WithAPIClient(apiClient). + WithLogger(getK8sEnvForTest(t).Log). + WithK8sClient(getK8sEnvForTest(t).Client). + Build() + if err != nil { + t.Fatalf("Failed to create executor: %v", err) + } + + // Create already cancelled context + ctx, cancel := context.WithCancel(context.Background()) + cancel() // Cancel immediately + + evt := createTestEvent("cluster-cancelled", "resource-cancelled") + result := exec.Execute(ctx, evt) + + // Should fail due to context cancellation + // Note: The exact behavior depends on where cancellation is checked + t.Logf("Result with cancelled context: status=%s, error=%v", result.Status, result.Error) +} + +func TestExecutor_MissingRequiredParam(t *testing.T) { + // Test that missing required params cause failure + mockAPI := testutil.NewMockAPIServer(t) + defer mockAPI.Close() + + // Set env var initially so executor can be created + t.Setenv("HYPERFLEET_API_BASE_URL", mockAPI.URL()) + t.Setenv("HYPERFLEET_API_VERSION", "v1") + + config := createTestConfig(mockAPI.URL()) + apiClient, _ := hyperfleet_api.NewClient() + + exec, err := executor.NewBuilder(). + WithAdapterConfig(config). + WithAPIClient(apiClient). + WithLogger(getK8sEnvForTest(t).Log). + WithK8sClient(getK8sEnvForTest(t).Client). + Build() + if err != nil { + t.Fatalf("Failed to create executor: %v", err) + } + + // Unset the env var after executor creation + if err := os.Unsetenv("HYPERFLEET_API_BASE_URL"); err != nil { + t.Fatalf("Failed to unset env var: %v", err) + } + + evt := createTestEvent("cluster-missing-param", "resource-missing") + result := exec.Execute(context.Background(), evt) + + // Should fail during param extraction + if result.Status != executor.StatusFailed { + t.Errorf("Expected failed status for missing required param, got %s", result.Status) + } + + if result.Phase != executor.PhaseParamExtraction { + t.Errorf("Expected failure in param_extraction phase, got %s", result.Phase) + } + + // Post-actions DO NOT execute for param extraction failures + // We skip all phases to avoid processing invalid events + if len(result.PostActionResults) != 0 { + t.Errorf("Expected 0 post-actions for param extraction failure, got %d", len(result.PostActionResults)) + } + + // Preconditions should not execute + if len(result.PreconditionResults) != 0 { + t.Errorf("Expected 0 preconditions for param extraction failure, got %d", len(result.PreconditionResults)) + } + + // Resources should not execute + if len(result.ResourceResults) != 0 { + t.Errorf("Expected 0 resources for param extraction failure, got %d", len(result.ResourceResults)) + } + + // Test handler behavior: should ACK (not NACK) invalid events + handler := exec.CreateHandler() + err = handler(context.Background(), evt) + if err != nil { + t.Errorf("Handler should ACK (return nil) for param extraction failures, got error: %v", err) + } + + t.Logf("Correctly failed with missing required param and ACKed (not NACKed): %v", result.Error) +} + +// TestExecutor_InvalidEventJSON tests handling of malformed event data +func TestExecutor_InvalidEventJSON(t *testing.T) { + mockAPI := testutil.NewMockAPIServer(t) + defer mockAPI.Close() + + t.Setenv("HYPERFLEET_API_BASE_URL", mockAPI.URL()) + t.Setenv("HYPERFLEET_API_VERSION", "v1") + + config := createTestConfig(mockAPI.URL()) + apiClient, _ := hyperfleet_api.NewClient() + + exec, err := executor.NewBuilder(). + WithAdapterConfig(config). + WithAPIClient(apiClient). + WithLogger(getK8sEnvForTest(t).Log). + WithK8sClient(getK8sEnvForTest(t).Client). + Build() + if err != nil { + t.Fatalf("Failed to create executor: %v", err) + } + + // Create event with invalid JSON data + evt := event.New() + evt.SetID("invalid-event-123") + evt.SetType("com.redhat.hyperfleet.cluster.provision") + evt.SetSource("test") + + // Set malformed JSON data that can't be parsed + invalidJSON := []byte("this is not valid JSON {{{") + _ = evt.SetData(event.ApplicationJSON, invalidJSON) + + result := exec.Execute(context.Background(), &evt) + + // Should fail during param extraction (JSON parsing) + assert.Equal(t, executor.StatusFailed, result.Status, "Should fail with invalid JSON") + assert.Equal(t, executor.PhaseParamExtraction, result.Phase, "Should fail in param extraction phase") + assert.NotNil(t, result.Error, "Should have error set") + t.Logf("Invalid JSON error: %v", result.Error) + + // All phases should be skipped for invalid events + assert.Empty(t, result.PostActionResults, "Post-actions should not execute for invalid event") + assert.Empty(t, result.PreconditionResults, "Preconditions should not execute for invalid event") + assert.Empty(t, result.ResourceResults, "Resources should not execute for invalid event") + + // Test handler behavior: should ACK (not NACK) invalid events + handler := exec.CreateHandler() + err = handler(context.Background(), &evt) + assert.Nil(t, err, "Handler should ACK (return nil) for invalid events, not NACK") + + t.Log("Expected behavior: Invalid event is ACKed (not NACKed), all phases skipped") +} + +// TestExecutor_MissingEventFields tests handling of events missing required fields +func TestExecutor_MissingEventFields(t *testing.T) { + mockAPI := testutil.NewMockAPIServer(t) + defer mockAPI.Close() + + t.Setenv("HYPERFLEET_API_BASE_URL", mockAPI.URL()) + t.Setenv("HYPERFLEET_API_VERSION", "v1") + + config := createTestConfig(mockAPI.URL()) + apiClient, _ := hyperfleet_api.NewClient() + + exec, err := executor.NewBuilder(). + WithAdapterConfig(config). + WithAPIClient(apiClient). + WithLogger(getK8sEnvForTest(t).Log). + WithK8sClient(getK8sEnvForTest(t).Client). + Build() + if err != nil { + t.Fatalf("Failed to create executor: %v", err) + } + + // Create event missing required field (cluster_id) + evt := event.New() + evt.SetID("missing-field-event") + evt.SetType("com.redhat.hyperfleet.cluster.provision") + evt.SetSource("test") + + eventData := map[string]interface{}{ + "resource_id": "resource-123", + // Missing cluster_id (required) + } + eventDataBytes, _ := json.Marshal(eventData) + _ = evt.SetData(event.ApplicationJSON, eventDataBytes) + + result := exec.Execute(context.Background(), &evt) + + // Should fail during param extraction (missing required param from event) + assert.Equal(t, executor.StatusFailed, result.Status, "Should fail with missing required field") + assert.Equal(t, executor.PhaseParamExtraction, result.Phase, "Should fail in param extraction") + assert.NotNil(t, result.Error) + assert.Contains(t, result.Error.Error(), "clusterId", "Error should mention missing clusterId") + t.Logf("Missing field error: %v", result.Error) + + // All phases should be skipped for events with missing required fields + assert.Empty(t, result.PostActionResults, "Post-actions should not execute for missing required field") + assert.Empty(t, result.PreconditionResults, "Preconditions should not execute for missing required field") + assert.Empty(t, result.ResourceResults, "Resources should not execute for missing required field") + + // Test handler behavior: should ACK (not NACK) events with missing required fields + handler := exec.CreateHandler() + err = handler(context.Background(), &evt) + assert.Nil(t, err, "Handler should ACK (return nil) for missing required fields, not NACK") + + t.Log("Expected behavior: Event with missing required field is ACKed (not NACKed), all phases skipped") +} + +// TestExecutor_LogAction tests log actions in preconditions and post-actions +func TestExecutor_LogAction(t *testing.T) { + mockAPI := testutil.NewMockAPIServer(t) + defer mockAPI.Close() + + t.Setenv("HYPERFLEET_API_BASE_URL", mockAPI.URL()) + t.Setenv("HYPERFLEET_API_VERSION", "v1") + + // Create a custom logger that captures log messages + logCapture := &logCaptureLogger{t: t, messages: make([]string, 0)} + + // Create config with log actions in preconditions and post-actions + config := &config_loader.AdapterConfig{ + APIVersion: "hyperfleet.redhat.com/v1alpha1", + Kind: "AdapterConfig", + Metadata: config_loader.Metadata{ + Name: "log-test-adapter", + Namespace: "test", + }, + Spec: config_loader.AdapterConfigSpec{ + Adapter: config_loader.AdapterInfo{Version: "1.0.0"}, + HyperfleetAPI: config_loader.HyperfleetAPIConfig{ + Timeout: "10s", RetryAttempts: 1, + }, + Params: []config_loader.Parameter{ + {Name: "hyperfleetApiBaseUrl", Source: "env.HYPERFLEET_API_BASE_URL", Required: true}, + {Name: "hyperfleetApiVersion", Default: "v1"}, + {Name: "clusterId", Source: "event.cluster_id", Required: true}, + {Name: "resourceId", Source: "event.resource_id", Required: true}, + }, + Preconditions: []config_loader.Precondition{ + { + // Log action only - no API call or conditions + Name: "logStart", + Log: &config_loader.LogAction{ + Message: "Starting processing for cluster {{ .clusterId }}", + Level: "info", + }, + }, + { + // Log action before API call + Name: "logBeforeAPICall", + Log: &config_loader.LogAction{ + Message: "About to check cluster status for {{ .clusterId }}", + Level: "debug", + }, + }, + { + Name: "checkCluster", + APICall: &config_loader.APICall{ + Method: "GET", + URL: "{{ .hyperfleetApiBaseUrl }}/api/{{ .hyperfleetApiVersion }}/clusters/{{ .clusterId }}", + }, + Capture: []config_loader.CaptureField{ + {Name: "clusterPhase", Field: "status.phase"}, + }, + Conditions: []config_loader.Condition{ + {Field: "clusterPhase", Operator: "equals", Value: "Ready"}, + }, + }, + }, + Post: &config_loader.PostConfig{ + PostActions: []config_loader.PostAction{ + { + // Log action in post-actions + Name: "logCompletion", + Log: &config_loader.LogAction{ + Message: "Completed processing cluster {{ .clusterId }} with resource {{ .resourceId }}", + Level: "info", + }, + }, + { + // Log with warning level + Name: "logWarning", + Log: &config_loader.LogAction{ + Message: "This is a warning for cluster {{ .clusterId }}", + Level: "warning", + }, + }, + }, + }, + }, + } + + apiClient, _ := hyperfleet_api.NewClient() + exec, err := executor.NewBuilder(). + WithAdapterConfig(config). + WithAPIClient(apiClient). + WithLogger(logCapture). + WithK8sClient(getK8sEnvForTest(t).Client). + Build() + if err != nil { + t.Fatalf("Failed to create executor: %v", err) + } + + // Execute + evt := createTestEvent("log-test-cluster", "log-test-resource") + result := exec.Execute(context.Background(), evt) + + // Should succeed + if result.Status != executor.StatusSuccess { + t.Fatalf("Expected success, got %s: %v", result.Status, result.Error) + } + + // Verify log messages were captured + t.Logf("Captured %d log messages", len(logCapture.messages)) + for i, msg := range logCapture.messages { + t.Logf(" [%d] %s", i, msg) + } + + // Check for expected log messages (with [config] prefix) + expectedLogs := []string{ + "[config] Starting processing for cluster log-test-cluster", + "[config] About to check cluster status for log-test-cluster", + "[config] Completed processing cluster log-test-cluster with resource log-test-resource", + "[config] This is a warning for cluster log-test-cluster", + } + + for _, expected := range expectedLogs { + found := false + for _, msg := range logCapture.messages { + if strings.Contains(msg, expected) { + found = true + break + } + } + if !found { + t.Errorf("Expected log message not found: %s", expected) + } + } + + // Verify preconditions executed (including log-only ones) + if len(result.PreconditionResults) != 3 { + t.Errorf("Expected 3 precondition results, got %d", len(result.PreconditionResults)) + } + + // Verify post actions executed + if len(result.PostActionResults) != 2 { + t.Errorf("Expected 2 post action results, got %d", len(result.PostActionResults)) + } + + t.Logf("Log action test completed successfully") +} + +// logCaptureLogger captures log messages for testing +type logCaptureLogger struct { + t *testing.T + messages []string + mu sync.Mutex +} + +func (l *logCaptureLogger) V(level int32) logger.Logger { return l } +func (l *logCaptureLogger) Extra(key string, value interface{}) logger.Logger { return l } + +func (l *logCaptureLogger) capture(level, format string, args ...interface{}) { + l.mu.Lock() + defer l.mu.Unlock() + msg := fmt.Sprintf("[%s] "+format, append([]interface{}{level}, args...)...) + l.messages = append(l.messages, msg) + l.t.Logf("%s", msg) +} + +func (l *logCaptureLogger) Infof(format string, args ...interface{}) { + l.capture("INFO", format, args...) +} +func (l *logCaptureLogger) Warningf(format string, args ...interface{}) { + l.capture("WARN", format, args...) +} +func (l *logCaptureLogger) Errorf(format string, args ...interface{}) { + l.capture("ERROR", format, args...) +} +func (l *logCaptureLogger) Info(message string) { l.capture("INFO", "%s", message) } +func (l *logCaptureLogger) Warning(message string) { l.capture("WARN", "%s", message) } +func (l *logCaptureLogger) Error(message string) { l.capture("ERROR", "%s", message) } +func (l *logCaptureLogger) Fatal(message string) { l.t.Fatalf("[FATAL] %s", message) } + +// TestExecutor_PostActionAPIFailure tests handling of post action API failures (4xx/5xx responses) +func TestExecutor_PostActionAPIFailure(t *testing.T) { + // Setup mock API server that fails post action API calls + mockAPI := testutil.NewMockAPIServer(t) + defer mockAPI.Close() + + // Preconditions will succeed, but post action API call will fail with 500 + mockAPI.SetFailPostAction(true) + + // Set environment variables + t.Setenv("HYPERFLEET_API_BASE_URL", mockAPI.URL()) + t.Setenv("HYPERFLEET_API_VERSION", "v1") + + // Create config and executor + config := createTestConfig(mockAPI.URL()) + apiClient, _ := hyperfleet_api.NewClient( + hyperfleet_api.WithRetryAttempts(1), + ) + + exec, err := executor.NewBuilder(). + WithAdapterConfig(config). + WithAPIClient(apiClient). + WithLogger(getK8sEnvForTest(t).Log). + WithK8sClient(getK8sEnvForTest(t).Client). + Build() + if err != nil { + t.Fatalf("Failed to create executor: %v", err) + } + + // Execute + evt := createTestEvent("cluster-post-fail", "resource-post-fail") + ctx := context.Background() + result := exec.Execute(ctx, evt) + + // Verify result - should be failed due to post action API error + assert.Equal(t, executor.StatusFailed, result.Status, "Expected failed status for post action API error") + assert.NotNil(t, result.Error, "Expected error to be set") + t.Logf("Post action API failure error: %v", result.Error) + + // Verify preconditions passed successfully + assert.Equal(t, 1, len(result.PreconditionResults), "Expected 1 precondition result") + if len(result.PreconditionResults) > 0 { + assert.True(t, result.PreconditionResults[0].Matched, "Expected precondition to match") + } + + // Verify post action was attempted and failed + assert.Equal(t, 1, len(result.PostActionResults), "Expected 1 post action result") + if len(result.PostActionResults) > 0 { + postResult := result.PostActionResults[0] + assert.Equal(t, executor.StatusFailed, postResult.Status, "Expected post action to fail") + assert.NotNil(t, postResult.Error, "Expected post action error to be set") + assert.True(t, postResult.APICallMade, "Expected API call to be made") + assert.Equal(t, http.StatusInternalServerError, postResult.HTTPStatus, "Expected 500 status code") + + // Verify error contains status code and response body + errStr := postResult.Error.Error() + assert.Contains(t, errStr, "500", "Error should contain status code") + assert.Contains(t, errStr, "Internal Server Error", "Error should contain status text") + // The response body should be included in the error + t.Logf("Post action error message: %s", errStr) + } + + // Verify ExecutionError was populated in execution context + assert.NotNil(t, result.ExecutionContext, "Expected execution context to be set") + if result.ExecutionContext != nil { + assert.NotNil(t, result.ExecutionContext.Adapter.ExecutionError, "Expected ExecutionError to be populated") + if result.ExecutionContext.Adapter.ExecutionError != nil { + execErr := result.ExecutionContext.Adapter.ExecutionError + assert.Equal(t, "post_actions", execErr.Phase, "Expected error in post_actions phase") + assert.Equal(t, "reportClusterStatus", execErr.Step, "Expected error in reportClusterStatus step") + assert.Contains(t, execErr.Message, "500", "Expected error message to contain 500 status code") + t.Logf("ExecutionError: phase=%s, step=%s, message=%s", + execErr.Phase, execErr.Step, execErr.Message) + } + } + + // Verify the phase is post_actions + assert.Equal(t, executor.PhasePostActions, result.Phase, "Expected failure in post_actions phase") + + // Verify precondition API was called, but status POST failed + requests := mockAPI.GetRequests() + assert.GreaterOrEqual(t, len(requests), 2, "Expected at least 2 API calls (GET cluster + POST status)") + + // Find the status POST request + var statusPostFound bool + for _, req := range requests { + if req.Method == http.MethodPost && strings.Contains(req.Path, "/status") { + statusPostFound = true + t.Logf("Status POST was attempted: %s %s", req.Method, req.Path) + } + } + assert.True(t, statusPostFound, "Expected status POST to be attempted") + + // No status should be successfully stored since POST failed + statusResponses := mockAPI.GetStatusResponses() + assert.Empty(t, statusResponses, "Expected no successful status responses due to API failure") + + t.Logf("Post action API failure test completed successfully") +} + +// TestExecutor_ExecutionError_CELAccess tests that adapter.executionError is accessible via CEL expressions +func TestExecutor_ExecutionError_CELAccess(t *testing.T) { + // Setup mock API server that fails precondition to trigger an error + mockAPI := testutil.NewMockAPIServer(t) + defer mockAPI.Close() + mockAPI.SetFailPrecondition(true) // Will return 404 for cluster lookup + + // Set environment variables + t.Setenv("HYPERFLEET_API_BASE_URL", mockAPI.URL()) + t.Setenv("HYPERFLEET_API_VERSION", "v1") + + // Create config with CEL expressions that access adapter.executionError + config := &config_loader.AdapterConfig{ + APIVersion: "hyperfleet.redhat.com/v1alpha1", + Kind: "AdapterConfig", + Metadata: config_loader.Metadata{ + Name: "executionError-cel-test", + Namespace: "test", + }, + Spec: config_loader.AdapterConfigSpec{ + Adapter: config_loader.AdapterInfo{Version: "1.0.0"}, + HyperfleetAPI: config_loader.HyperfleetAPIConfig{ + Timeout: "10s", RetryAttempts: 1, RetryBackoff: "constant", + }, + Params: []config_loader.Parameter{ + {Name: "hyperfleetApiBaseUrl", Source: "env.HYPERFLEET_API_BASE_URL", Required: true}, + {Name: "hyperfleetApiVersion", Default: "v1"}, + {Name: "clusterId", Source: "event.cluster_id", Required: true}, + }, + Preconditions: []config_loader.Precondition{ + { + Name: "clusterStatus", + APICall: &config_loader.APICall{ + Method: "GET", + URL: "{{ .hyperfleetApiBaseUrl }}/api/{{ .hyperfleetApiVersion }}/clusters/{{ .clusterId }}", + Timeout: "5s", + }, + Capture: []config_loader.CaptureField{ + {Name: "clusterPhase", Field: "status.phase"}, + }, + Conditions: []config_loader.Condition{ + {Field: "clusterPhase", Operator: "equals", Value: "Ready"}, + }, + }, + }, + Resources: []config_loader.Resource{}, + Post: &config_loader.PostConfig{ + Payloads: []config_loader.Payload{ + { + Name: "errorReportPayload", + Build: map[string]interface{}{ + // Test accessing adapter.executionError fields via CEL + "hasError": map[string]interface{}{ + "expression": "has(adapter.executionError) && adapter.executionError != null", + }, + "errorPhase": map[string]interface{}{ + "expression": "has(adapter.executionError) && adapter.executionError != null ? adapter.executionError.phase : \"no_error\"", + }, + "errorStep": map[string]interface{}{ + "expression": "has(adapter.executionError) && adapter.executionError != null ? adapter.executionError.step : \"no_step\"", + }, + "errorMessage": map[string]interface{}{ + "expression": "has(adapter.executionError) && adapter.executionError != null ? adapter.executionError.message : \"no_message\"", + }, + // Also test that other adapter fields still work + "executionStatus": map[string]interface{}{ + "expression": "adapter.executionStatus", + }, + "errorReason": map[string]interface{}{ + "expression": "adapter.errorReason", + }, + "clusterId": map[string]interface{}{ + "value": "{{ .clusterId }}", + }, + }, + }, + }, + PostActions: []config_loader.PostAction{ + { + Name: "reportError", + APICall: &config_loader.APICall{ + Method: "POST", + URL: "{{ .hyperfleetApiBaseUrl }}/api/{{ .hyperfleetApiVersion }}/clusters/{{ .clusterId }}/error-report", + Body: "{{ .errorReportPayload }}", + Timeout: "5s", + }, + }, + }, + }, + }, + } + + apiClient, _ := hyperfleet_api.NewClient(hyperfleet_api.WithRetryAttempts(1)) + + exec, err := executor.NewBuilder(). + WithAdapterConfig(config). + WithAPIClient(apiClient). + WithLogger(getK8sEnvForTest(t).Log). + WithK8sClient(getK8sEnvForTest(t).Client). + Build() + if err != nil { + t.Fatalf("Failed to create executor: %v", err) + } + + // Execute - should fail due to precondition API error + evt := createTestEvent("cluster-cel-error-test", "resource-cel-error") + ctx := context.Background() + result := exec.Execute(ctx, evt) + + // Verify execution failed (due to precondition failure) + assert.Equal(t, executor.StatusFailed, result.Status, "Expected failed status") + assert.NotNil(t, result.Error, "Expected error to be set") + + // Verify post action was attempted (to report the error) + assert.Equal(t, 1, len(result.PostActionResults), "Expected 1 post action result") + if len(result.PostActionResults) > 0 { + postResult := result.PostActionResults[0] + // The post action itself may fail (mock server returns 404), but the API call should have been made + assert.True(t, postResult.APICallMade, "Expected API call to be made") + // Note: Post action status may be failed if mock API returns 404 for error-report endpoint + } + + // Verify the error report payload was built correctly with CEL expressions accessing executionError + requests := mockAPI.GetRequests() + var errorReportRequest *testutil.MockRequest + for i := range requests { + if requests[i].Method == http.MethodPost && strings.Contains(requests[i].Path, "/error-report") { + errorReportRequest = &requests[i] + break + } + } + + assert.NotNil(t, errorReportRequest, "Expected error report API call to be made") + if errorReportRequest != nil { + t.Logf("Error report body: %s", errorReportRequest.Body) + + // Parse the request body + var reportPayload map[string]interface{} + err := json.Unmarshal([]byte(errorReportRequest.Body), &reportPayload) + assert.NoError(t, err, "Should be able to parse error report payload") + + if err == nil { + // Verify CEL expressions successfully accessed adapter.executionError + assert.Equal(t, true, reportPayload["hasError"], "hasError should be true") + assert.Equal(t, "preconditions", reportPayload["errorPhase"], "errorPhase should be 'preconditions'") + assert.Equal(t, "clusterStatus", reportPayload["errorStep"], "errorStep should be 'clusterStatus'") + assert.NotEqual(t, "no_message", reportPayload["errorMessage"], "errorMessage should contain actual error") + + // Verify other adapter fields still accessible + assert.Equal(t, "failed", reportPayload["executionStatus"], "executionStatus should be 'failed'") + assert.NotEmpty(t, reportPayload["errorReason"], "errorReason should be populated") + assert.Equal(t, "cluster-cel-error-test", reportPayload["clusterId"], "clusterId should match") + + t.Logf("CEL expressions successfully accessed executionError:") + t.Logf(" hasError: %v", reportPayload["hasError"]) + t.Logf(" errorPhase: %v", reportPayload["errorPhase"]) + t.Logf(" errorStep: %v", reportPayload["errorStep"]) + t.Logf(" errorMessage: %v", reportPayload["errorMessage"]) + t.Logf(" executionStatus: %v", reportPayload["executionStatus"]) + t.Logf(" errorReason: %v", reportPayload["errorReason"]) + } + } + + t.Logf("ExecutionError CEL access test completed successfully") +} + +// TestExecutor_PayloadBuildFailure tests that payload build failures are logged as errors and block post actions +func TestExecutor_PayloadBuildFailure(t *testing.T) { + // Setup mock API server + mockAPI := testutil.NewMockAPIServer(t) + defer mockAPI.Close() + + // Set environment variables + t.Setenv("HYPERFLEET_API_BASE_URL", mockAPI.URL()) + t.Setenv("HYPERFLEET_API_VERSION", "v1") + + // Create config with invalid CEL expression in payload build (will cause build failure) + config := &config_loader.AdapterConfig{ + APIVersion: "hyperfleet.redhat.com/v1alpha1", + Kind: "AdapterConfig", + Metadata: config_loader.Metadata{ + Name: "payload-build-fail-test", + Namespace: "test", + }, + Spec: config_loader.AdapterConfigSpec{ + Adapter: config_loader.AdapterInfo{Version: "1.0.0"}, + HyperfleetAPI: config_loader.HyperfleetAPIConfig{ + Timeout: "10s", RetryAttempts: 1, + }, + Params: []config_loader.Parameter{ + {Name: "hyperfleetApiBaseUrl", Source: "env.HYPERFLEET_API_BASE_URL", Required: true}, + {Name: "hyperfleetApiVersion", Default: "v1"}, + {Name: "clusterId", Source: "event.cluster_id", Required: true}, + }, + Preconditions: []config_loader.Precondition{ + { + Name: "simpleCheck", + Conditions: []config_loader.Condition{ + {Field: "clusterId", Operator: "equals", Value: "test-cluster"}, + }, + }, + }, + Resources: []config_loader.Resource{}, + Post: &config_loader.PostConfig{ + Payloads: []config_loader.Payload{ + { + Name: "badPayload", + Build: map[string]interface{}{ + // Use template that references non-existent parameter + "field": map[string]interface{}{ + "value": "{{ .nonExistentParam }}", + }, + }, + }, + }, + PostActions: []config_loader.PostAction{ + { + Name: "shouldNotExecute", + APICall: &config_loader.APICall{ + Method: "POST", + URL: "{{ .hyperfleetApiBaseUrl }}/api/{{ .hyperfleetApiVersion }}/clusters/{{ .clusterId }}/status", + Body: "{{ .badPayload }}", + Timeout: "5s", + }, + }, + }, + }, + }, + } + + apiClient, _ := hyperfleet_api.NewClient() + + // Use log capture to verify error logging + logCapture := &logCaptureLogger{t: t, messages: make([]string, 0)} + + exec, err := executor.NewBuilder(). + WithAdapterConfig(config). + WithAPIClient(apiClient). + WithLogger(logCapture). + WithK8sClient(getK8sEnvForTest(t).Client). + Build() + if err != nil { + t.Fatalf("Failed to create executor: %v", err) + } + + // Execute + evt := createTestEvent("test-cluster", "resource-payload-fail") + ctx := context.Background() + result := exec.Execute(ctx, evt) + + // Verify execution failed in post_actions phase (payload build) + assert.Equal(t, executor.StatusFailed, result.Status, "Expected failed status") + assert.Equal(t, executor.PhasePostActions, result.Phase, "Expected failure in post_actions phase") + assert.NotNil(t, result.Error, "Expected error to be set") + + // Verify preconditions passed + assert.Equal(t, 1, len(result.PreconditionResults), "Expected 1 precondition result") + if len(result.PreconditionResults) > 0 { + assert.True(t, result.PreconditionResults[0].Matched, "Expected precondition to pass") + } + + // Verify NO post actions were executed (blocked by payload build failure) + assert.Equal(t, 0, len(result.PostActionResults), "Expected 0 post action results (blocked by payload build failure)") + + // Verify ExecutionError was set + assert.NotNil(t, result.ExecutionContext, "Expected execution context") + if result.ExecutionContext != nil { + assert.NotNil(t, result.ExecutionContext.Adapter.ExecutionError, "Expected ExecutionError to be set") + if result.ExecutionContext.Adapter.ExecutionError != nil { + assert.Equal(t, "post_actions", result.ExecutionContext.Adapter.ExecutionError.Phase) + assert.Equal(t, "build_payloads", result.ExecutionContext.Adapter.ExecutionError.Step) + t.Logf("ExecutionError: %+v", result.ExecutionContext.Adapter.ExecutionError) + } + } + + // Verify error was logged (should contain "failed to build") + foundErrorLog := false + for _, msg := range logCapture.messages { + if strings.Contains(msg, "[ERROR]") && strings.Contains(msg, "failed to build") { + foundErrorLog = true + t.Logf("Found error log: %s", msg) + break + } + } + assert.True(t, foundErrorLog, "Expected to find error log for payload build failure") + + // Verify NO API call was made to the post action endpoint (blocked) + requests := mockAPI.GetRequests() + for _, req := range requests { + if req.Method == http.MethodPost && strings.Contains(req.Path, "/status") { + t.Errorf("Post action API call should NOT have been made (blocked by payload build failure)") + } + } + + t.Logf("Payload build failure test completed: post actions properly blocked, error logged") +} + diff --git a/test/integration/executor/executor_k8s_integration_test.go b/test/integration/executor/executor_k8s_integration_test.go new file mode 100644 index 0000000..c9c9c59 --- /dev/null +++ b/test/integration/executor/executor_k8s_integration_test.go @@ -0,0 +1,1028 @@ +package executor_integration_test + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "strings" + "sync" + "testing" + "time" + + "github.com/cloudevents/sdk-go/v2/event" + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/config_loader" + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/executor" + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/hyperfleet_api" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// k8sTestAPIServer creates a mock API server for K8s integration tests +type k8sTestAPIServer struct { + server *httptest.Server + mu sync.Mutex + requests []k8sTestRequest + clusterResponse map[string]interface{} + statusResponses []map[string]interface{} +} + +type k8sTestRequest struct { + Method string + Path string + Body string +} + +func newK8sTestAPIServer(t *testing.T) *k8sTestAPIServer { + mock := &k8sTestAPIServer{ + requests: make([]k8sTestRequest, 0), + clusterResponse: map[string]interface{}{ + "metadata": map[string]interface{}{ + "name": "test-cluster", + }, + "spec": map[string]interface{}{ + "region": "us-east-1", + "provider": "aws", + "vpc_id": "vpc-12345", + "node_count": 3, + }, + "status": map[string]interface{}{ + "phase": "Ready", + }, + }, + statusResponses: make([]map[string]interface{}, 0), + } + + mock.server = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + mock.mu.Lock() + defer mock.mu.Unlock() + + var bodyStr string + if r.Body != nil { + buf := make([]byte, 1024*1024) + n, _ := r.Body.Read(buf) + bodyStr = string(buf[:n]) + } + + mock.requests = append(mock.requests, k8sTestRequest{ + Method: r.Method, + Path: r.URL.Path, + Body: bodyStr, + }) + + t.Logf("Mock API: %s %s", r.Method, r.URL.Path) + + switch { + case strings.Contains(r.URL.Path, "/clusters/") && strings.HasSuffix(r.URL.Path, "/status"): + if r.Method == http.MethodPost { + var statusBody map[string]interface{} + if err := json.Unmarshal([]byte(bodyStr), &statusBody); err == nil { + mock.statusResponses = append(mock.statusResponses, statusBody) + } + w.WriteHeader(http.StatusOK) + _ = json.NewEncoder(w).Encode(map[string]string{"status": "accepted"}) + return + } + case strings.Contains(r.URL.Path, "/clusters/"): + if r.Method == http.MethodGet { + w.WriteHeader(http.StatusOK) + _ = json.NewEncoder(w).Encode(mock.clusterResponse) + return + } + } + + w.WriteHeader(http.StatusNotFound) + _ = json.NewEncoder(w).Encode(map[string]string{"error": "not found"}) + })) + + return mock +} + +func (m *k8sTestAPIServer) Close() { + m.server.Close() +} + +func (m *k8sTestAPIServer) URL() string { + return m.server.URL +} + +func (m *k8sTestAPIServer) GetStatusResponses() []map[string]interface{} { + m.mu.Lock() + defer m.mu.Unlock() + return append([]map[string]interface{}{}, m.statusResponses...) +} + +// createK8sTestEvent creates a CloudEvent for K8s integration testing +func createK8sTestEvent(clusterId string) *event.Event { + evt := event.New() + evt.SetID("k8s-test-event-" + clusterId) + evt.SetType("com.redhat.hyperfleet.cluster.provision") + evt.SetSource("k8s-integration-test") + evt.SetTime(time.Now()) + + eventData := map[string]interface{}{ + "cluster_id": clusterId, + "resource_id": "resource-" + clusterId, + "resource_type": "cluster", + "generation": "gen-001", + "href": "/api/v1/clusters/" + clusterId, + } + eventDataBytes, _ := json.Marshal(eventData) + _ = evt.SetData(event.ApplicationJSON, eventDataBytes) + + return &evt +} + +// createK8sTestConfig creates an AdapterConfig with K8s resources +func createK8sTestConfig(apiBaseURL, testNamespace string) *config_loader.AdapterConfig { + return &config_loader.AdapterConfig{ + APIVersion: "hyperfleet.redhat.com/v1alpha1", + Kind: "AdapterConfig", + Metadata: config_loader.Metadata{ + Name: "k8s-test-adapter", + Namespace: testNamespace, + }, + Spec: config_loader.AdapterConfigSpec{ + Adapter: config_loader.AdapterInfo{ + Version: "1.0.0", + }, + HyperfleetAPI: config_loader.HyperfleetAPIConfig{ + Timeout: "10s", + RetryAttempts: 1, + RetryBackoff: "constant", + }, + Params: []config_loader.Parameter{ + { + Name: "hyperfleetApiBaseUrl", + Source: "env.HYPERFLEET_API_BASE_URL", + Required: true, + }, + { + Name: "hyperfleetApiVersion", + Source: "env.HYPERFLEET_API_VERSION", + Default: "v1", + Required: false, + }, + { + Name: "clusterId", + Source: "event.cluster_id", + Required: true, + }, + { + Name: "testNamespace", + Default: testNamespace, + Required: false, + }, + }, + Preconditions: []config_loader.Precondition{ + { + Name: "clusterStatus", + APICall: &config_loader.APICall{ + Method: "GET", + URL: "{{ .hyperfleetApiBaseUrl }}/api/{{ .hyperfleetApiVersion }}/clusters/{{ .clusterId }}", + Timeout: "5s", + }, + Capture: []config_loader.CaptureField{ + {Name: "clusterName", Field: "metadata.name"}, + {Name: "clusterPhase", Field: "status.phase"}, + {Name: "region", Field: "spec.region"}, + {Name: "cloudProvider", Field: "spec.provider"}, + }, + Conditions: []config_loader.Condition{ + {Field: "clusterPhase", Operator: "in", Value: []interface{}{"Provisioning", "Installing", "Ready"}}, + }, + }, + }, + // K8s Resources to create + Resources: []config_loader.Resource{ + { + Name: "clusterConfigMap", + Manifest: map[string]interface{}{ + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": map[string]interface{}{ + "name": "cluster-config-{{ .clusterId }}", + "namespace": testNamespace, + "labels": map[string]interface{}{ + "hyperfleet.io/cluster-id": "{{ .clusterId }}", + "hyperfleet.io/managed-by": "{{ .metadata.name }}", + "test": "executor-integration", + }, + }, + "data": map[string]interface{}{ + "cluster-id": "{{ .clusterId }}", + "cluster-name": "{{ .clusterName }}", + "region": "{{ .region }}", + "provider": "{{ .cloudProvider }}", + "phase": "{{ .clusterPhase }}", + }, + }, + Discovery: &config_loader.DiscoveryConfig{ + Namespace: testNamespace, + ByName: "cluster-config-{{ .clusterId }}", + }, + }, + { + Name: "clusterSecret", + Manifest: map[string]interface{}{ + "apiVersion": "v1", + "kind": "Secret", + "metadata": map[string]interface{}{ + "name": "cluster-secret-{{ .clusterId }}", + "namespace": testNamespace, + "labels": map[string]interface{}{ + "hyperfleet.io/cluster-id": "{{ .clusterId }}", + "hyperfleet.io/managed-by": "{{ .metadata.name }}", + "test": "executor-integration", + }, + }, + "type": "Opaque", + "stringData": map[string]interface{}{ + "cluster-id": "{{ .clusterId }}", + "api-token": "test-token-{{ .clusterId }}", + }, + }, + Discovery: &config_loader.DiscoveryConfig{ + Namespace: testNamespace, + ByName: "cluster-secret-{{ .clusterId }}", + }, + }, + }, + Post: &config_loader.PostConfig{ + Payloads: []config_loader.Payload{ + { + Name: "clusterStatusPayload", + Build: map[string]interface{}{ + "conditions": map[string]interface{}{ + "applied": map[string]interface{}{ + "status": map[string]interface{}{ + "expression": "adapter.executionStatus == \"success\"", + }, + "reason": map[string]interface{}{ + "expression": "has(adapter.errorReason) ? adapter.errorReason : \"ResourcesCreated\"", + }, + "message": map[string]interface{}{ + "expression": "has(adapter.errorMessage) ? adapter.errorMessage : \"ConfigMap and Secret created successfully\"", + }, + }, + }, + "clusterId": map[string]interface{}{ + "value": "{{ .clusterId }}", + }, + "resourcesCreated": map[string]interface{}{ + "value": "2", + }, + }, + }, + }, + PostActions: []config_loader.PostAction{ + { + Name: "reportClusterStatus", + APICall: &config_loader.APICall{ + Method: "POST", + URL: "{{ .hyperfleetApiBaseUrl }}/api/{{ .hyperfleetApiVersion }}/clusters/{{ .clusterId }}/status", + Body: "{{ .clusterStatusPayload }}", + Timeout: "5s", + }, + }, + }, + }, + }, + } +} + +// TestExecutor_K8s_CreateResources tests the full flow with real K8s resource creation +func TestExecutor_K8s_CreateResources(t *testing.T) { + // Setup K8s test environment + k8sEnv := SetupK8sTestEnv(t) + defer k8sEnv.Cleanup(t) + + // Create test namespace + testNamespace := fmt.Sprintf("executor-test-%d", time.Now().Unix()) + k8sEnv.CreateTestNamespace(t, testNamespace) + defer k8sEnv.CleanupTestNamespace(t, testNamespace) + + // Setup mock API server + mockAPI := newK8sTestAPIServer(t) + defer mockAPI.Close() + + // Set environment variables + t.Setenv("HYPERFLEET_API_BASE_URL", mockAPI.URL()) + t.Setenv("HYPERFLEET_API_VERSION", "v1") + + // Create config with K8s resources + config := createK8sTestConfig(mockAPI.URL(), testNamespace) + apiClient, _ := hyperfleet_api.NewClient( + hyperfleet_api.WithTimeout(10*time.Second), + hyperfleet_api.WithRetryAttempts(1), + ) + + // Create executor with real K8s client + exec, err := executor.NewBuilder(). + WithAdapterConfig(config). + WithAPIClient(apiClient). + WithK8sClient(k8sEnv.Client). + WithLogger(k8sEnv.Log). + Build() + require.NoError(t, err) + + // Create test event + clusterId := fmt.Sprintf("cluster-%d", time.Now().UnixNano()) + evt := createK8sTestEvent(clusterId) + + // Execute + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel() + + result := exec.Execute(ctx, evt) + + // Verify execution succeeded + if result.Status != executor.StatusSuccess { + t.Fatalf("Expected success status, got %s: %v (phase: %s)", result.Status, result.Error, result.Phase) + } + + t.Logf("Execution completed successfully") + + // Verify resource results + require.Len(t, result.ResourceResults, 2, "Expected 2 resource results") + + // Check ConfigMap was created + cmResult := result.ResourceResults[0] + assert.Equal(t, "clusterConfigMap", cmResult.Name) + assert.Equal(t, executor.StatusSuccess, cmResult.Status, "ConfigMap creation should succeed") + assert.Equal(t, executor.OperationCreate, cmResult.Operation, "Should be create operation") + assert.Equal(t, "ConfigMap", cmResult.Kind) + t.Logf("ConfigMap created: %s/%s (operation: %s)", cmResult.Namespace, cmResult.ResourceName, cmResult.Operation) + + // Check Secret was created + secretResult := result.ResourceResults[1] + assert.Equal(t, "clusterSecret", secretResult.Name) + assert.Equal(t, executor.StatusSuccess, secretResult.Status, "Secret creation should succeed") + assert.Equal(t, executor.OperationCreate, secretResult.Operation) + assert.Equal(t, "Secret", secretResult.Kind) + t.Logf("Secret created: %s/%s (operation: %s)", secretResult.Namespace, secretResult.ResourceName, secretResult.Operation) + + // Verify ConfigMap exists in K8s + cmGVK := schema.GroupVersionKind{Group: "", Version: "v1", Kind: "ConfigMap"} + cmName := fmt.Sprintf("cluster-config-%s", clusterId) + cm, err := k8sEnv.Client.GetResource(ctx, cmGVK, testNamespace, cmName) + require.NoError(t, err, "ConfigMap should exist in K8s") + assert.Equal(t, cmName, cm.GetName()) + + // Verify ConfigMap data + cmData, found, err := unstructured.NestedStringMap(cm.Object, "data") + require.NoError(t, err) + require.True(t, found, "ConfigMap should have data") + assert.Equal(t, clusterId, cmData["cluster-id"]) + assert.Equal(t, "test-cluster", cmData["cluster-name"]) + assert.Equal(t, "us-east-1", cmData["region"]) + assert.Equal(t, "aws", cmData["provider"]) + assert.Equal(t, "Ready", cmData["phase"]) + t.Logf("ConfigMap data verified: %+v", cmData) + + // Verify ConfigMap labels + cmLabels := cm.GetLabels() + assert.Equal(t, clusterId, cmLabels["hyperfleet.io/cluster-id"]) + assert.Equal(t, "k8s-test-adapter", cmLabels["hyperfleet.io/managed-by"]) + + // Verify Secret exists in K8s + secretGVK := schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Secret"} + secretName := fmt.Sprintf("cluster-secret-%s", clusterId) + secret, err := k8sEnv.Client.GetResource(ctx, secretGVK, testNamespace, secretName) + require.NoError(t, err, "Secret should exist in K8s") + assert.Equal(t, secretName, secret.GetName()) + t.Logf("Secret verified: %s", secretName) + + // Verify post action reported status with correct template expression values + statusResponses := mockAPI.GetStatusResponses() + require.Len(t, statusResponses, 1, "Should have 1 status response") + status := statusResponses[0] + t.Logf("Status reported: %+v", status) + + if conditions, ok := status["conditions"].(map[string]interface{}); ok { + if applied, ok := conditions["applied"].(map[string]interface{}); ok { + // Status should be true (adapter.executionStatus == "success") + assert.Equal(t, true, applied["status"], "Applied status should be true") + + // Reason should be "ResourcesCreated" (default, no adapter.errorReason) + assert.Equal(t, "ResourcesCreated", applied["reason"], "Should use default reason for success") + + // Message should be success message (default, no adapter.errorMessage) + if message, ok := applied["message"].(string); ok { + assert.Equal(t, "ConfigMap and Secret created successfully", message, "Should use default success message") + } + } + } +} + +// TestExecutor_K8s_UpdateExistingResource tests updating an existing resource +func TestExecutor_K8s_UpdateExistingResource(t *testing.T) { + k8sEnv := SetupK8sTestEnv(t) + defer k8sEnv.Cleanup(t) + + testNamespace := fmt.Sprintf("executor-update-%d", time.Now().Unix()) + k8sEnv.CreateTestNamespace(t, testNamespace) + defer k8sEnv.CleanupTestNamespace(t, testNamespace) + + mockAPI := newK8sTestAPIServer(t) + defer mockAPI.Close() + + t.Setenv("HYPERFLEET_API_BASE_URL", mockAPI.URL()) + t.Setenv("HYPERFLEET_API_VERSION", "v1") + + clusterId := fmt.Sprintf("update-cluster-%d", time.Now().UnixNano()) + + // Pre-create the ConfigMap + existingCM := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": map[string]interface{}{ + "name": fmt.Sprintf("cluster-config-%s", clusterId), + "namespace": testNamespace, + "labels": map[string]interface{}{ + "hyperfleet.io/cluster-id": clusterId, + "hyperfleet.io/managed-by": "k8s-test-adapter", + "test": "executor-integration", + }, + }, + "data": map[string]interface{}{ + "cluster-id": clusterId, + "phase": "Provisioning", // Old value + }, + }, + } + existingCM.SetGroupVersionKind(schema.GroupVersionKind{Group: "", Version: "v1", Kind: "ConfigMap"}) + + ctx := context.Background() + _, err := k8sEnv.Client.CreateResource(ctx, existingCM) + require.NoError(t, err, "Failed to pre-create ConfigMap") + t.Logf("Pre-created ConfigMap with phase=Provisioning") + + // Create executor + config := createK8sTestConfig(mockAPI.URL(), testNamespace) + // Only include ConfigMap resource for this test + config.Spec.Resources = config.Spec.Resources[:1] + + apiClient, _ := hyperfleet_api.NewClient() + exec, err := executor.NewBuilder(). + WithAdapterConfig(config). + WithAPIClient(apiClient). + WithK8sClient(k8sEnv.Client). + WithLogger(k8sEnv.Log). + Build() + require.NoError(t, err) + + // Execute - should update existing resource + evt := createK8sTestEvent(clusterId) + result := exec.Execute(ctx, evt) + + require.Equal(t, executor.StatusSuccess, result.Status, "Execution should succeed: %v", result.Error) + + // Verify it was an update operation + require.Len(t, result.ResourceResults, 1) + cmResult := result.ResourceResults[0] + assert.Equal(t, executor.OperationUpdate, cmResult.Operation, "Should be update operation") + t.Logf("Resource operation: %s", cmResult.Operation) + + // Verify ConfigMap was updated with new data + cmGVK := schema.GroupVersionKind{Group: "", Version: "v1", Kind: "ConfigMap"} + cmName := fmt.Sprintf("cluster-config-%s", clusterId) + updatedCM, err := k8sEnv.Client.GetResource(ctx, cmGVK, testNamespace, cmName) + require.NoError(t, err) + + cmData, _, _ := unstructured.NestedStringMap(updatedCM.Object, "data") + assert.Equal(t, "Ready", cmData["phase"], "Phase should be updated to Ready") + assert.Equal(t, "test-cluster", cmData["cluster-name"], "Should have new cluster-name field") + t.Logf("Updated ConfigMap data: %+v", cmData) + + // Verify status payload was built and sent with correct template expression values + statusResponses := mockAPI.GetStatusResponses() + require.Len(t, statusResponses, 1, "Should have reported status") + status := statusResponses[0] + t.Logf("Status reported after update: %+v", status) + + // Verify the status payload contains success values from template expressions + if conditions, ok := status["conditions"].(map[string]interface{}); ok { + if applied, ok := conditions["applied"].(map[string]interface{}); ok { + // Status should be true (adapter.executionStatus == "success") + assert.Equal(t, true, applied["status"], "Applied status should be true for successful update") + + // Reason should be default success reason (no adapter.errorReason) + assert.Equal(t, "ResourcesCreated", applied["reason"], "Should use default reason") + + // Message should be default success message (no adapter.errorMessage) + if message, ok := applied["message"].(string); ok { + assert.Contains(t, message, "created successfully", "Should contain success message") + } + } + } +} + +// TestExecutor_K8s_DiscoveryByLabels tests resource discovery using label selectors +func TestExecutor_K8s_DiscoveryByLabels(t *testing.T) { + k8sEnv := SetupK8sTestEnv(t) + defer k8sEnv.Cleanup(t) + + testNamespace := fmt.Sprintf("executor-discovery-%d", time.Now().Unix()) + k8sEnv.CreateTestNamespace(t, testNamespace) + defer k8sEnv.CleanupTestNamespace(t, testNamespace) + + mockAPI := newK8sTestAPIServer(t) + defer mockAPI.Close() + + t.Setenv("HYPERFLEET_API_BASE_URL", mockAPI.URL()) + t.Setenv("HYPERFLEET_API_VERSION", "v1") + + clusterId := fmt.Sprintf("discovery-cluster-%d", time.Now().UnixNano()) + + // Create config with label-based discovery + config := createK8sTestConfig(mockAPI.URL(), testNamespace) + // Modify to use label selector instead of byName + config.Spec.Resources = []config_loader.Resource{ + { + Name: "clusterConfigMap", + Manifest: map[string]interface{}{ + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": map[string]interface{}{ + "name": "cluster-config-{{ .clusterId }}", + "namespace": testNamespace, + "labels": map[string]interface{}{ + "hyperfleet.io/cluster-id": "{{ .clusterId }}", + "hyperfleet.io/managed-by": "{{ .metadata.name }}", + "app": "cluster-config", + }, + }, + "data": map[string]interface{}{ + "cluster-id": "{{ .clusterId }}", + }, + }, + Discovery: &config_loader.DiscoveryConfig{ + Namespace: testNamespace, + BySelectors: &config_loader.SelectorConfig{ + LabelSelector: map[string]string{ + "hyperfleet.io/cluster-id": "{{ .clusterId }}", + "app": "cluster-config", + }, + }, + }, + }, + } + + apiClient, _ := hyperfleet_api.NewClient() + exec, err := executor.NewBuilder(). + WithAdapterConfig(config). + WithAPIClient(apiClient). + WithK8sClient(k8sEnv.Client). + WithLogger(k8sEnv.Log). + Build() + require.NoError(t, err) + + ctx := context.Background() + + // First execution - should create + evt := createK8sTestEvent(clusterId) + result1 := exec.Execute(ctx, evt) + require.Equal(t, executor.StatusSuccess, result1.Status) + assert.Equal(t, executor.OperationCreate, result1.ResourceResults[0].Operation) + t.Logf("First execution: %s", result1.ResourceResults[0].Operation) + + // Second execution - should find by labels and update + evt2 := createK8sTestEvent(clusterId) + result2 := exec.Execute(ctx, evt2) + require.Equal(t, executor.StatusSuccess, result2.Status) + assert.Equal(t, executor.OperationUpdate, result2.ResourceResults[0].Operation) + t.Logf("Second execution: %s (discovered by labels)", result2.ResourceResults[0].Operation) +} + +// TestExecutor_K8s_RecreateOnChange tests the recreateOnChange behavior +func TestExecutor_K8s_RecreateOnChange(t *testing.T) { + k8sEnv := SetupK8sTestEnv(t) + defer k8sEnv.Cleanup(t) + + testNamespace := fmt.Sprintf("executor-recreate-%d", time.Now().Unix()) + k8sEnv.CreateTestNamespace(t, testNamespace) + defer k8sEnv.CleanupTestNamespace(t, testNamespace) + + mockAPI := newK8sTestAPIServer(t) + defer mockAPI.Close() + + t.Setenv("HYPERFLEET_API_BASE_URL", mockAPI.URL()) + t.Setenv("HYPERFLEET_API_VERSION", "v1") + + clusterId := fmt.Sprintf("recreate-cluster-%d", time.Now().UnixNano()) + + // Create config with recreateOnChange + config := createK8sTestConfig(mockAPI.URL(), testNamespace) + config.Spec.Resources = []config_loader.Resource{ + { + Name: "clusterConfigMap", + RecreateOnChange: true, // Enable recreate + Manifest: map[string]interface{}{ + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": map[string]interface{}{ + "name": "cluster-config-{{ .clusterId }}", + "namespace": testNamespace, + "labels": map[string]interface{}{ + "hyperfleet.io/cluster-id": "{{ .clusterId }}", + }, + }, + "data": map[string]interface{}{ + "cluster-id": "{{ .clusterId }}", + }, + }, + Discovery: &config_loader.DiscoveryConfig{ + Namespace: testNamespace, + ByName: "cluster-config-{{ .clusterId }}", + }, + }, + } + + apiClient, _ := hyperfleet_api.NewClient() + exec, err := executor.NewBuilder(). + WithAdapterConfig(config). + WithAPIClient(apiClient). + WithK8sClient(k8sEnv.Client). + WithLogger(k8sEnv.Log). + Build() + require.NoError(t, err) + + ctx := context.Background() + + // First execution - create + evt := createK8sTestEvent(clusterId) + result1 := exec.Execute(ctx, evt) + require.Equal(t, executor.StatusSuccess, result1.Status) + assert.Equal(t, executor.OperationCreate, result1.ResourceResults[0].Operation) + + // Get the original UID + cmGVK := schema.GroupVersionKind{Group: "", Version: "v1", Kind: "ConfigMap"} + cmName := fmt.Sprintf("cluster-config-%s", clusterId) + originalCM, err := k8sEnv.Client.GetResource(ctx, cmGVK, testNamespace, cmName) + require.NoError(t, err) + originalUID := originalCM.GetUID() + t.Logf("Original ConfigMap UID: %s", originalUID) + + // Second execution - should recreate (delete + create) + evt2 := createK8sTestEvent(clusterId) + result2 := exec.Execute(ctx, evt2) + require.Equal(t, executor.StatusSuccess, result2.Status) + assert.Equal(t, executor.OperationRecreate, result2.ResourceResults[0].Operation) + t.Logf("Second execution: %s", result2.ResourceResults[0].Operation) + + // Verify it's a new resource (different UID) + recreatedCM, err := k8sEnv.Client.GetResource(ctx, cmGVK, testNamespace, cmName) + require.NoError(t, err) + newUID := recreatedCM.GetUID() + assert.NotEqual(t, originalUID, newUID, "Resource should have new UID after recreate") + t.Logf("Recreated ConfigMap UID: %s (different from %s)", newUID, originalUID) +} + +// TestExecutor_K8s_MultipleResourceTypes tests creating different resource types +func TestExecutor_K8s_MultipleResourceTypes(t *testing.T) { + k8sEnv := SetupK8sTestEnv(t) + defer k8sEnv.Cleanup(t) + + testNamespace := fmt.Sprintf("executor-multi-%d", time.Now().Unix()) + k8sEnv.CreateTestNamespace(t, testNamespace) + defer k8sEnv.CleanupTestNamespace(t, testNamespace) + + mockAPI := newK8sTestAPIServer(t) + defer mockAPI.Close() + + t.Setenv("HYPERFLEET_API_BASE_URL", mockAPI.URL()) + t.Setenv("HYPERFLEET_API_VERSION", "v1") + + // Execute with default config (ConfigMap + Secret) + config := createK8sTestConfig(mockAPI.URL(), testNamespace) + apiClient, _ := hyperfleet_api.NewClient() + + exec, err := executor.NewBuilder(). + WithAdapterConfig(config). + WithAPIClient(apiClient). + WithK8sClient(k8sEnv.Client). + WithLogger(k8sEnv.Log). + Build() + require.NoError(t, err) + + clusterId := fmt.Sprintf("multi-cluster-%d", time.Now().UnixNano()) + evt := createK8sTestEvent(clusterId) + + result := exec.Execute(context.Background(), evt) + + require.Equal(t, executor.StatusSuccess, result.Status) + require.Len(t, result.ResourceResults, 2) + + // Verify both resources created + for _, rr := range result.ResourceResults { + assert.Equal(t, executor.StatusSuccess, rr.Status, "Resource %s should succeed", rr.Name) + assert.Equal(t, executor.OperationCreate, rr.Operation) + t.Logf("Created %s: %s/%s", rr.Kind, rr.Namespace, rr.ResourceName) + } + + // Verify we can list resources by labels + cmGVK := schema.GroupVersionKind{Group: "", Version: "v1", Kind: "ConfigMap"} + selector := fmt.Sprintf("hyperfleet.io/cluster-id=%s", clusterId) + list, err := k8sEnv.Client.ListResources(context.Background(), cmGVK, testNamespace, selector) + require.NoError(t, err) + assert.Len(t, list.Items, 1, "Should find 1 ConfigMap with cluster label") +} + +// TestExecutor_K8s_ResourceCreationFailure tests handling of K8s API failures +func TestExecutor_K8s_ResourceCreationFailure(t *testing.T) { + k8sEnv := SetupK8sTestEnv(t) + defer k8sEnv.Cleanup(t) + + // Use a namespace that doesn't exist (should fail) + nonExistentNamespace := "non-existent-namespace-12345" + + mockAPI := newK8sTestAPIServer(t) + defer mockAPI.Close() + + t.Setenv("HYPERFLEET_API_BASE_URL", mockAPI.URL()) + t.Setenv("HYPERFLEET_API_VERSION", "v1") + + config := createK8sTestConfig(mockAPI.URL(), nonExistentNamespace) + apiClient, _ := hyperfleet_api.NewClient() + + exec, err := executor.NewBuilder(). + WithAdapterConfig(config). + WithAPIClient(apiClient). + WithK8sClient(k8sEnv.Client). + WithLogger(k8sEnv.Log). + Build() + require.NoError(t, err) + + evt := createK8sTestEvent("failure-test") + result := exec.Execute(context.Background(), evt) + + // Should fail during resource creation + assert.Equal(t, executor.StatusFailed, result.Status) + // Phase will be post_actions because executor continues to post-actions after resource failure + // This is correct behavior - we want to report errors even when resources fail + assert.Equal(t, executor.PhasePostActions, result.Phase) + assert.NotNil(t, result.Error) + t.Logf("Expected failure: %v", result.Error) + + // Post actions should still execute to report error + assert.NotEmpty(t, result.PostActionResults, "Post actions should still execute") + + // Verify K8s error is captured in the status payload via adapter.xxx fields + statusResponses := mockAPI.GetStatusResponses() + if len(statusResponses) == 1 { + status := statusResponses[0] + t.Logf("K8s error status payload: %+v", status) + + if conditions, ok := status["conditions"].(map[string]interface{}); ok { + if applied, ok := conditions["applied"].(map[string]interface{}); ok { + // Status should be false (adapter.executionStatus != "success") + assert.Equal(t, false, applied["status"], "Applied status should be false for K8s error") + + // Reason should contain K8s error (from adapter.errorReason) + if reason, ok := applied["reason"].(string); ok { + if reason == "ResourcesCreated" { + t.Error("Expected K8s error reason, got default success reason") + } + t.Logf("K8s error reason: %s", reason) + } + + // Message should contain K8s error details (from adapter.errorMessage) + if message, ok := applied["message"].(string); ok { + if message == "ConfigMap and Secret created successfully" { + t.Error("Expected K8s error message, got default success message") + } + // Should contain namespace-related error + if !strings.Contains(strings.ToLower(message), "namespace") && + !strings.Contains(strings.ToLower(message), "not found") { + t.Logf("Warning: K8s error message may not contain expected keywords: %s", message) + } + t.Logf("K8s error message: %s", message) + } + } + } + } else { + t.Logf("Note: Expected status response for K8s error, got %d responses", len(statusResponses)) + } +} + +// TestExecutor_K8s_MultipleMatchingResources tests behavior when multiple resources match label selector +// Expected behavior: returns the first matching resource (order is not guaranteed by K8s API) +// TestExecutor_K8s_MultipleMatchingResources tests resource creation with multiple labeled resources. +// Note: Discovery-based update logic is not yet implemented. This test currently only verifies +// that creating a new resource works when other resources with similar labels exist. +// TODO: Implement proper discovery-based update logic and update this test accordingly. +func TestExecutor_K8s_MultipleMatchingResources(t *testing.T) { + k8sEnv := SetupK8sTestEnv(t) + defer k8sEnv.Cleanup(t) + + testNamespace := fmt.Sprintf("executor-multi-match-%d", time.Now().Unix()) + k8sEnv.CreateTestNamespace(t, testNamespace) + defer k8sEnv.CleanupTestNamespace(t, testNamespace) + + mockAPI := newK8sTestAPIServer(t) + defer mockAPI.Close() + + t.Setenv("HYPERFLEET_API_BASE_URL", mockAPI.URL()) + t.Setenv("HYPERFLEET_API_VERSION", "v1") + + clusterId := fmt.Sprintf("multi-match-%d", time.Now().UnixNano()) + ctx := context.Background() + + // Pre-create multiple ConfigMaps with the same labels but different names + for i := 1; i <= 3; i++ { + cm := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": map[string]interface{}{ + "name": fmt.Sprintf("config-%s-%d", clusterId, i), + "namespace": testNamespace, + "labels": map[string]interface{}{ + "hyperfleet.io/cluster-id": clusterId, + "app": "multi-match-test", + }, + }, + "data": map[string]interface{}{ + "index": fmt.Sprintf("%d", i), + }, + }, + } + cm.SetGroupVersionKind(schema.GroupVersionKind{Group: "", Version: "v1", Kind: "ConfigMap"}) + _, err := k8sEnv.Client.CreateResource(ctx, cm) + require.NoError(t, err, "Failed to pre-create ConfigMap %d", i) + } + t.Logf("Pre-created 3 ConfigMaps with same labels") + + // Create config WITHOUT discovery - just create a new resource + // Discovery-based update logic is not yet implemented + config := &config_loader.AdapterConfig{ + APIVersion: "hyperfleet.redhat.com/v1alpha1", + Kind: "AdapterConfig", + Metadata: config_loader.Metadata{ + Name: "multi-match-test", + Namespace: testNamespace, + }, + Spec: config_loader.AdapterConfigSpec{ + Adapter: config_loader.AdapterInfo{Version: "1.0.0"}, + HyperfleetAPI: config_loader.HyperfleetAPIConfig{ + Timeout: "10s", RetryAttempts: 1, + }, + Params: []config_loader.Parameter{ + {Name: "hyperfleetApiBaseUrl", Source: "env.HYPERFLEET_API_BASE_URL", Required: true}, + {Name: "hyperfleetApiVersion", Default: "v1"}, + {Name: "clusterId", Source: "event.cluster_id", Required: true}, + }, + // No preconditions - this test focuses on resource creation + Resources: []config_loader.Resource{ + { + Name: "clusterConfig", + Manifest: map[string]interface{}{ + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": map[string]interface{}{ + "name": "config-{{ .clusterId }}-new", + "namespace": testNamespace, + "labels": map[string]interface{}{ + "hyperfleet.io/cluster-id": "{{ .clusterId }}", + "app": "multi-match-test", + }, + }, + "data": map[string]interface{}{ + "cluster-id": "{{ .clusterId }}", + "created": "true", + }, + }, + // No Discovery - just create the resource + }, + }, + }, + } + + apiClient, _ := hyperfleet_api.NewClient() + exec, err := executor.NewBuilder(). + WithAdapterConfig(config). + WithAPIClient(apiClient). + WithK8sClient(k8sEnv.Client). + WithLogger(k8sEnv.Log). + Build() + require.NoError(t, err) + + evt := createK8sTestEvent(clusterId) + result := exec.Execute(ctx, evt) + + require.Equal(t, executor.StatusSuccess, result.Status, "Execution should succeed: %v", result.Error) + require.Len(t, result.ResourceResults, 1) + + // Should create a new resource (no discovery configured) + rr := result.ResourceResults[0] + assert.Equal(t, executor.OperationCreate, rr.Operation, + "Should create new resource (no discovery configured)") + t.Logf("Operation: %s on resource: %s/%s", rr.Operation, rr.Namespace, rr.ResourceName) + + // Verify we now have 4 ConfigMaps (3 pre-created + 1 new) + cmGVK := schema.GroupVersionKind{Group: "", Version: "v1", Kind: "ConfigMap"} + selector := fmt.Sprintf("hyperfleet.io/cluster-id=%s,app=multi-match-test", clusterId) + list, err := k8sEnv.Client.ListResources(ctx, cmGVK, testNamespace, selector) + require.NoError(t, err) + assert.Len(t, list.Items, 4, "Should have 4 ConfigMaps (3 pre-created + 1 new)") + + // Verify the new one has the "created" field + createdCount := 0 + for _, item := range list.Items { + data, _, _ := unstructured.NestedStringMap(item.Object, "data") + if data["created"] == "true" { + createdCount++ + t.Logf("Created ConfigMap: %s", item.GetName()) + } + } + assert.Equal(t, 1, createdCount, "Exactly one ConfigMap should be created") +} + +// TestExecutor_K8s_PostActionsAfterPreconditionNotMet tests that post actions execute even when preconditions don't match +func TestExecutor_K8s_PostActionsAfterPreconditionNotMet(t *testing.T) { + k8sEnv := SetupK8sTestEnv(t) + defer k8sEnv.Cleanup(t) + + testNamespace := fmt.Sprintf("executor-precond-fail-%d", time.Now().Unix()) + k8sEnv.CreateTestNamespace(t, testNamespace) + defer k8sEnv.CleanupTestNamespace(t, testNamespace) + + mockAPI := newK8sTestAPIServer(t) + defer mockAPI.Close() + + // Set cluster to Terminating phase (won't match condition) + mockAPI.clusterResponse = map[string]interface{}{ + "metadata": map[string]interface{}{"name": "test-cluster"}, + "spec": map[string]interface{}{"region": "us-east-1"}, + "status": map[string]interface{}{"phase": "Terminating"}, // Won't match + } + + t.Setenv("HYPERFLEET_API_BASE_URL", mockAPI.URL()) + t.Setenv("HYPERFLEET_API_VERSION", "v1") + + config := createK8sTestConfig(mockAPI.URL(), testNamespace) + apiClient, _ := hyperfleet_api.NewClient() + + exec, err := executor.NewBuilder(). + WithAdapterConfig(config). + WithAPIClient(apiClient). + WithK8sClient(k8sEnv.Client). + WithLogger(k8sEnv.Log). + Build() + require.NoError(t, err) + + clusterId := fmt.Sprintf("precond-fail-%d", time.Now().UnixNano()) + evt := createK8sTestEvent(clusterId) + + result := exec.Execute(context.Background(), evt) + + // Should be success with resources skipped (precondition not met is valid outcome) + assert.Equal(t, executor.StatusSuccess, result.Status, "Should be success when precondition not met (valid outcome)") + assert.True(t, result.ResourcesSkipped, "Resources should be skipped") + assert.Contains(t, result.SkipReason, "precondition", "Skip reason should mention precondition") + + // Resources should NOT be created (skipped) + assert.Empty(t, result.ResourceResults, "Resources should be skipped when precondition not met") + + // Post actions SHOULD still execute + assert.NotEmpty(t, result.PostActionResults, "Post actions should execute even when precondition not met") + t.Logf("Post action executed: %s (status: %s)", + result.PostActionResults[0].Name, result.PostActionResults[0].Status) + + // Verify status was reported with error info + statusResponses := mockAPI.GetStatusResponses() + require.Len(t, statusResponses, 1, "Should have reported status") + status := statusResponses[0] + t.Logf("Status reported after precondition failure: %+v", status) + + // Check that error info is in the status payload via template expressions + if conditions, ok := status["conditions"].(map[string]interface{}); ok { + if applied, ok := conditions["applied"].(map[string]interface{}); ok { + // Status should be false (adapter.executionStatus != "success") + assert.Equal(t, false, applied["status"], "Applied status should be false") + + // Reason should come from adapter.errorReason (not default) + if reason, ok := applied["reason"].(string); ok { + if reason == "ResourcesCreated" { + t.Error("Expected reason to be from adapter.errorReason, got default success reason") + } + t.Logf("Applied reason: %s", reason) + } + + // Message should come from adapter.errorMessage (not default) + if message, ok := applied["message"].(string); ok { + if message == "ConfigMap and Secret created successfully" { + t.Error("Expected message to be from adapter.errorMessage, got default success message") + } + t.Logf("Applied message: %s", message) + } + } + } +} + + diff --git a/test/integration/executor/main_test.go b/test/integration/executor/main_test.go new file mode 100644 index 0000000..e98fc75 --- /dev/null +++ b/test/integration/executor/main_test.go @@ -0,0 +1,204 @@ +package executor_integration_test + +import ( + "context" + "flag" + "fmt" + "os" + "testing" + "time" + + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/k8s_client" + "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/logger" + "github.com/openshift-hyperfleet/hyperfleet-adapter/test/integration/testutil" + "github.com/testcontainers/testcontainers-go" + "github.com/testcontainers/testcontainers-go/wait" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/rest" +) + +const ( + EnvtestAPIServerPort = "6443/tcp" + EnvtestReadyLog = "Envtest is running" + EnvtestBearerToken = "envtest-token" +) + +// sharedK8sEnv holds the shared test environment for executor integration tests +var sharedK8sEnv *K8sTestEnv + +// setupErr holds any error that occurred during setup +var setupErr error + +// TestMain runs before all tests to set up the shared envtest container +func TestMain(m *testing.M) { + flag.Parse() + + // Check if we should skip integration tests + if testing.Short() { + os.Exit(m.Run()) + } + + // Check if INTEGRATION_ENVTEST_IMAGE is set + imageName := os.Getenv("INTEGRATION_ENVTEST_IMAGE") + if imageName == "" { + println("⚠️ INTEGRATION_ENVTEST_IMAGE not set, K8s tests will be skipped") + os.Exit(m.Run()) + } + + // Quick check if testcontainers can work + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + provider, err := testcontainers.NewDockerProvider() + if err != nil { + setupErr = err + println("⚠️ Warning: Could not connect to container runtime:", err.Error()) + println(" K8s tests will be skipped") + os.Exit(m.Run()) + } + + info, err := provider.DaemonHost(ctx) + _ = provider.Close() + + if err != nil { + setupErr = err + println("⚠️ Warning: Could not get container runtime info:", err.Error()) + println(" K8s tests will be skipped") + os.Exit(m.Run()) + } + + println("✅ Container runtime available:", info) + println("🚀 Setting up shared envtest for executor tests...") + + // Set up the shared environment + env, err := setupsharedK8sEnvtestEnv() + if err != nil { + setupErr = err + println("❌ Failed to set up shared environment:", err.Error()) + println(" K8s tests will be skipped") + os.Exit(m.Run()) + } + + sharedK8sEnv = env + println("✅ Shared envtest container ready for executor tests!") + println() + + // Run tests + exitCode := m.Run() + + // Cleanup after all tests + if sharedK8sEnv != nil && sharedK8sEnv.cleanup != nil { + println() + println("🧹 Cleaning up executor test envtest container...") + sharedK8sEnv.cleanup() + } + + os.Exit(exitCode) +} + +// setupsharedK8sEnvtestEnv creates the shared envtest environment for executor tests +func setupsharedK8sEnvtestEnv() (*K8sTestEnv, error) { + ctx := context.Background() + log := logger.NewLogger(ctx) + + imageName := os.Getenv("INTEGRATION_ENVTEST_IMAGE") + + // Start envtest container + config := testutil.ContainerConfig{ + Name: "executor-envtest", + Image: imageName, + ExposedPorts: []string{EnvtestAPIServerPort}, + WaitStrategy: wait.ForAll( + wait.ForListeningPort(EnvtestAPIServerPort).WithPollInterval(500 * time.Millisecond), + wait.ForLog(EnvtestReadyLog).WithPollInterval(500 * time.Millisecond), + ).WithDeadline(120 * time.Second), + MaxRetries: 3, + StartupTimeout: 3 * time.Minute, + } + + sharedContainer, err := testutil.StartSharedContainer(config) + if err != nil { + return nil, fmt.Errorf("failed to start envtest container: %w", err) + } + + // Get the kube-apiserver endpoint + kubeAPIServer := fmt.Sprintf("https://%s", sharedContainer.GetEndpoint(EnvtestAPIServerPort)) + println(fmt.Sprintf(" Kube-apiserver available at: %s", kubeAPIServer)) + + // Create rest.Config for the client + restConfig := &rest.Config{ + Host: kubeAPIServer, + BearerToken: EnvtestBearerToken, + TLSClientConfig: rest.TLSClientConfig{ + Insecure: true, + }, + } + + // Wait for API server to be ready + println(" Waiting for API server to be ready...") + if err := waitForAPIServerReady(restConfig, 30*time.Second); err != nil { + sharedContainer.Cleanup() + return nil, fmt.Errorf("API server failed to become ready: %w", err) + } + + // Create K8s client + client, err := k8s_client.NewClientFromConfig(ctx, restConfig, log) + if err != nil { + sharedContainer.Cleanup() + return nil, fmt.Errorf("failed to create K8s client: %w", err) + } + + println(" ✅ K8s client created successfully") + + // Create default namespace for tests + println(" Creating default namespace...") + defaultNS := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "v1", + "kind": "Namespace", + "metadata": map[string]interface{}{"name": "default"}, + }, + } + defaultNS.SetGroupVersionKind(schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Namespace"}) + _, err = client.CreateResource(ctx, defaultNS) + if err != nil && !apierrors.IsAlreadyExists(err) { + return nil, fmt.Errorf("failed to create default namespace: %w", err) + } + + return &K8sTestEnv{ + Client: client, + Config: restConfig, + Ctx: ctx, + Log: log, + cleanup: func() { + sharedContainer.Cleanup() + }, + }, nil +} + +// waitForAPIServerReady waits for the API server to be ready to accept connections +func waitForAPIServerReady(config *rest.Config, timeout time.Duration) error { + deadline := time.Now().Add(timeout) + ctx := context.Background() + log := logger.NewLogger(ctx) + + for time.Now().Before(deadline) { + // Try to create a client + client, err := k8s_client.NewClientFromConfig(ctx, config, log) + if err == nil { + // Try to list namespaces to verify API server is responsive + gvk := schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Namespace"} + _, err = client.ListResources(ctx, gvk, "", "") + if err == nil { + return nil // API server is ready + } + } + + time.Sleep(500 * time.Millisecond) + } + + return fmt.Errorf("timeout waiting for API server to be ready") +} + diff --git a/test/integration/executor/setup_test.go b/test/integration/executor/setup_test.go new file mode 100644 index 0000000..172be73 --- /dev/null +++ b/test/integration/executor/setup_test.go @@ -0,0 +1,88 @@ +package executor_integration_test + +import ( + "context" + "testing" + + "github.com/openshift-hyperfleet/hyperfleet-adapter/internal/k8s_client" + "github.com/openshift-hyperfleet/hyperfleet-adapter/pkg/logger" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/rest" +) + +// K8sTestEnv wraps the K8s test environment for executor tests +type K8sTestEnv struct { + Client *k8s_client.Client + Config *rest.Config + Ctx context.Context + Log logger.Logger + cleanup func() +} + +// SetupK8sTestEnv returns the shared K8s test environment +func SetupK8sTestEnv(t *testing.T) *K8sTestEnv { + t.Helper() + + // Check if shared environment is available + if setupErr != nil { + t.Skipf("K8s integration tests require INTEGRATION_ENVTEST_IMAGE: %v", setupErr) + } + + if sharedK8sEnv == nil { + t.Skip("K8s integration tests require INTEGRATION_ENVTEST_IMAGE") + } + + return sharedK8sEnv +} + +// Cleanup cleans up the test environment +func (e *K8sTestEnv) Cleanup(t *testing.T) { + if e.cleanup != nil { + e.cleanup() + } +} + +// CreateTestNamespace creates a namespace for test isolation +func (e *K8sTestEnv) CreateTestNamespace(t *testing.T, name string) { + t.Helper() + + ns := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "v1", + "kind": "Namespace", + "metadata": map[string]interface{}{ + "name": name, + "labels": map[string]interface{}{ + "test": "executor-integration", + "hyperfleet.io/test-namespace": "true", + }, + }, + }, + } + ns.SetGroupVersionKind(schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Namespace"}) + + _, err := e.Client.CreateResource(e.Ctx, ns) + if err != nil && !isAlreadyExistsError(err) { + t.Fatalf("Failed to create test namespace %s: %v", name, err) + } +} + +// CleanupTestNamespace deletes a test namespace +func (e *K8sTestEnv) CleanupTestNamespace(t *testing.T, name string) { + t.Helper() + + gvk := schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Namespace"} + err := e.Client.DeleteResource(e.Ctx, gvk, "", name) + if err != nil { + t.Logf("Warning: failed to cleanup namespace %s: %v", name, err) + } +} + +// isAlreadyExistsError checks if the error indicates the resource already exists +// Uses Kubernetes API error checking for type-safe error detection that properly unwraps error chains +func isAlreadyExistsError(err error) bool { + return apierrors.IsAlreadyExists(err) +} + diff --git a/test/integration/executor/testdata/test-adapter-config.yaml b/test/integration/executor/testdata/test-adapter-config.yaml new file mode 100644 index 0000000..e7b12fc --- /dev/null +++ b/test/integration/executor/testdata/test-adapter-config.yaml @@ -0,0 +1,89 @@ +apiVersion: hyperfleet.redhat.com/v1alpha1 +kind: AdapterConfig +metadata: + name: test-adapter + namespace: test-ns + +spec: + adapter: + version: "1.0.0" + + hyperfleetAPI: + timeout: 10s + retryAttempts: 1 + retryBackoff: constant + + params: + - name: hyperfleetApiBaseUrl + source: env.HYPERFLEET_API_BASE_URL + required: true + + - name: hyperfleetApiVersion + source: env.HYPERFLEET_API_VERSION + default: v1 + required: false + + - name: clusterId + source: event.cluster_id + required: true + + - name: resourceId + source: event.resource_id + required: true + + preconditions: + - name: clusterStatus + apiCall: + method: GET + url: "{{ .hyperfleetApiBaseUrl }}/api/{{ .hyperfleetApiVersion }}/clusters/{{ .clusterId }}" + timeout: 5s + capture: + - name: clusterName + field: metadata.name + - name: clusterPhase + field: status.phase + - name: region + field: spec.region + - name: cloudProvider + field: spec.provider + - name: vpcId + field: spec.vpc_id + conditions: + - field: clusterPhase + operator: in + value: ["Provisioning", "Installing", "Ready"] + - field: cloudProvider + operator: in + value: ["aws", "gcp", "azure"] + + resources: [] # No K8s resources in this test - dry run mode + + post: + payloads: + - name: clusterStatusPayload + build: + conditions: + health: + status: + expression: | + adapter.executionStatus == "success" && !adapter.resourcesSkipped + reason: + expression: | + adapter.resourcesSkipped ? "PreconditionNotMet" : (adapter.errorReason != "" ? adapter.errorReason : "Healthy") + message: + expression: | + adapter.skipReason != "" ? adapter.skipReason : (adapter.errorMessage != "" ? adapter.errorMessage : "All adapter operations completed successfully") + clusterId: + value: "{{ .clusterId }}" + clusterName: + expression: | + clusterName != "" ? clusterName : "unknown" + + postActions: + - name: reportClusterStatus + apiCall: + method: POST + url: "{{ .hyperfleetApiBaseUrl }}/api/{{ .hyperfleetApiVersion }}/clusters/{{ .clusterId }}/status" + body: "{{ .clusterStatusPayload }}" + timeout: 5s + diff --git a/test/integration/testutil/mock_api_server.go b/test/integration/testutil/mock_api_server.go new file mode 100644 index 0000000..666a793 --- /dev/null +++ b/test/integration/testutil/mock_api_server.go @@ -0,0 +1,226 @@ +// Package testutil provides common utilities for integration tests. +package testutil + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "strings" + "sync" + "testing" +) + +// MockRequest represents a recorded HTTP request +type MockRequest struct { + Method string + Path string + Body string +} + +// MockAPIServer creates a test HTTP server that simulates the HyperFleet API. +// It provides methods to configure responses and inspect recorded requests. +// +// TEMPORARY: This mock server is a placeholder for development and early testing. +// It will be replaced with a real hyperfleet-api container image (via testcontainers) +// for proper integration testing once the API image is available. +// +// TODO: Replace with testcontainers using hyperfleet-api image when available. +type MockAPIServer struct { + server *httptest.Server + mu sync.Mutex + requests []MockRequest + clusterResponse map[string]interface{} + statusResponses []map[string]interface{} + failPrecondition bool + failPostAction bool // If true, POST to /status endpoint returns 500 + t *testing.T +} + +// NewMockAPIServer creates a new MockAPIServer for testing. +// +// TEMPORARY: This will be replaced with a real hyperfleet-api testcontainer. +// See MockAPIServer documentation for details. +// +// The server simulates common HyperFleet API endpoints: +// - GET /clusters/{id} - Returns cluster details +// - POST /clusters/{id}/status - Accepts status updates +// - GET /validation/availability - Returns availability status +func NewMockAPIServer(t *testing.T) *MockAPIServer { + mock := &MockAPIServer{ + t: t, + requests: make([]MockRequest, 0), + clusterResponse: map[string]interface{}{ + "metadata": map[string]interface{}{ + "name": "test-cluster", + }, + "spec": map[string]interface{}{ + "region": "us-east-1", + "provider": "aws", + "vpc_id": "vpc-12345", + "node_count": 3, + }, + "status": map[string]interface{}{ + "phase": "Ready", + }, + }, + statusResponses: make([]map[string]interface{}, 0), + } + + mock.server = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + mock.mu.Lock() + defer mock.mu.Unlock() + + // Read body + var bodyStr string + if r.Body != nil { + buf := make([]byte, 1024*1024) + n, _ := r.Body.Read(buf) + bodyStr = string(buf[:n]) + } + + mock.requests = append(mock.requests, MockRequest{ + Method: r.Method, + Path: r.URL.Path, + Body: bodyStr, + }) + + t.Logf("Mock API received: %s %s", r.Method, r.URL.Path) + if bodyStr != "" { + t.Logf("Body: %s", bodyStr) + } + + // Route handling + switch { + case strings.Contains(r.URL.Path, "/clusters/") && strings.HasSuffix(r.URL.Path, "/status"): + // POST /clusters/{id}/status - Store status and return success (or fail if configured) + if r.Method == http.MethodPost { + // Check if we should fail the post action + if mock.failPostAction { + w.WriteHeader(http.StatusInternalServerError) + _ = json.NewEncoder(w).Encode(map[string]string{ + "error": "internal server error", + "message": "failed to update cluster status", + }) + return + } + + var statusBody map[string]interface{} + if err := json.Unmarshal([]byte(bodyStr), &statusBody); err == nil { + mock.statusResponses = append(mock.statusResponses, statusBody) + } + w.WriteHeader(http.StatusOK) + _ = json.NewEncoder(w).Encode(map[string]string{"status": "accepted"}) + return + } + + case strings.Contains(r.URL.Path, "/clusters/"): + // GET /clusters/{id} - Return cluster details + if r.Method == http.MethodGet { + if mock.failPrecondition { + w.WriteHeader(http.StatusNotFound) + _ = json.NewEncoder(w).Encode(map[string]string{"error": "cluster not found"}) + return + } + w.WriteHeader(http.StatusOK) + _ = json.NewEncoder(w).Encode(mock.clusterResponse) + return + } + + case strings.Contains(r.URL.Path, "/validation/availability"): + // GET validation availability + w.WriteHeader(http.StatusOK) + _ = json.NewEncoder(w).Encode("available") + return + } + + // Default 404 + w.WriteHeader(http.StatusNotFound) + _ = json.NewEncoder(w).Encode(map[string]string{"error": "not found"}) + })) + + return mock +} + +// Close stops the mock server +func (m *MockAPIServer) Close() { + m.server.Close() +} + +// URL returns the base URL of the mock server +func (m *MockAPIServer) URL() string { + return m.server.URL +} + +// GetRequests returns a copy of all recorded requests +func (m *MockAPIServer) GetRequests() []MockRequest { + m.mu.Lock() + defer m.mu.Unlock() + return append([]MockRequest{}, m.requests...) +} + +// GetStatusResponses returns a copy of all status responses received +func (m *MockAPIServer) GetStatusResponses() []map[string]interface{} { + m.mu.Lock() + defer m.mu.Unlock() + return append([]map[string]interface{}{}, m.statusResponses...) +} + +// SetClusterResponse sets the response for GET /clusters/{id} +func (m *MockAPIServer) SetClusterResponse(resp map[string]interface{}) { + m.mu.Lock() + defer m.mu.Unlock() + m.clusterResponse = resp +} + +// SetFailPrecondition configures whether precondition API calls should fail +func (m *MockAPIServer) SetFailPrecondition(fail bool) { + m.mu.Lock() + defer m.mu.Unlock() + m.failPrecondition = fail +} + +// SetFailPostAction configures whether post-action API calls should fail +func (m *MockAPIServer) SetFailPostAction(fail bool) { + m.mu.Lock() + defer m.mu.Unlock() + m.failPostAction = fail +} + +// ClearRequests clears all recorded requests +func (m *MockAPIServer) ClearRequests() { + m.mu.Lock() + defer m.mu.Unlock() + m.requests = make([]MockRequest, 0) +} + +// ClearStatusResponses clears all recorded status responses +func (m *MockAPIServer) ClearStatusResponses() { + m.mu.Lock() + defer m.mu.Unlock() + m.statusResponses = make([]map[string]interface{}, 0) +} + +// Reset resets the mock server to its initial state +func (m *MockAPIServer) Reset() { + m.mu.Lock() + defer m.mu.Unlock() + m.requests = make([]MockRequest, 0) + m.statusResponses = make([]map[string]interface{}, 0) + m.failPrecondition = false + m.failPostAction = false + m.clusterResponse = map[string]interface{}{ + "metadata": map[string]interface{}{ + "name": "test-cluster", + }, + "spec": map[string]interface{}{ + "region": "us-east-1", + "provider": "aws", + "vpc_id": "vpc-12345", + "node_count": 3, + }, + "status": map[string]interface{}{ + "phase": "Ready", + }, + } +} +