From 7ea0bfceb9411ff70264419144c46c6f715182c6 Mon Sep 17 00:00:00 2001 From: dawang Date: Fri, 30 Jan 2026 16:21:32 +0800 Subject: [PATCH 1/2] HYPERFLEET-455 - feat: Update .golangci.yaml based on the standard --- .golangci.yml | 105 +++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 104 insertions(+), 1 deletion(-) diff --git a/.golangci.yml b/.golangci.yml index 3d9e88f..dbe483a 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,5 +1,108 @@ -version: 2 +# HyperFleet Standard golangci-lint Configuration +# Reference: https://golangci-lint.run/usage/configuration +# +# This is the baseline configuration for all HyperFleet Go repositories. +# Copy this file to your repository root as .golangci.yml +# +# golangci-lint should be installed and versioned via bingo in each repository. +# See linting.md for full documentation of this standard. + +version: "2" + +run: + timeout: 5m + tests: true + modules-download-mode: readonly + +linters-settings: + errcheck: + check-type-assertions: true + check-blank: true + + govet: + enable-all: true + + goconst: + min-len: 3 + min-occurrences: 3 + + misspell: + locale: US + + lll: + line-length: 120 + + gofmt: + simplify: true + + revive: + rules: + - name: exported + severity: warning + disabled: true + - name: unexported-return + severity: warning + disabled: false + - name: var-naming + severity: warning + disabled: false + + unused: + check-exported: false + + unparam: + check-exported: false + + exhaustive: + check-generated: false + default-signifies-exhaustive: true + +linters: + enable: + # Code Quality + - errcheck + - govet + - staticcheck + - ineffassign + - unused + - unconvert + - unparam + - goconst + - exhaustive + + # Code Style + - misspell + - lll + - revive + - gocritic + + # Security + - gosec + issues: + # Add repository-specific generated code directories here exclude-dirs: - pkg/api/openapi # Generated code - data/generated/openapi # Generated file + + exclude-rules: + # Relaxed rules for test files + - path: _test\.go + linters: + - gosec + - errcheck + - unparam + + # Exclude generated code (backup pattern if exclude-dirs doesn't catch all) + - path: pkg/api/openapi/ + linters: + - all + + max-issues-per-linter: 0 + max-same-issues: 0 + new: false + +output: + format: colored-line-number + print-issued-lines: true + print-linter-name: true From 58c1a891465e4e9a5b61220ab1d44c071a902506 Mon Sep 17 00:00:00 2001 From: dawang Date: Fri, 30 Jan 2026 18:48:33 +0800 Subject: [PATCH 2/2] HYPERFLEET-455 - fix: comment revive in golangci.yml and fix goconst, gocritic, gosec, unparam and lll lint issues --- .golangci.yml | 2 +- cmd/hyperfleet-api/servecmd/cmd.go | 4 +- cmd/hyperfleet-api/server/api_server.go | 8 +- cmd/hyperfleet-api/server/health_server.go | 5 +- cmd/hyperfleet-api/server/metrics_server.go | 6 +- cmd/hyperfleet-api/server/routes.go | 14 ++- pkg/api/presenters/cluster_test.go | 10 +- pkg/api/presenters/slice_filter.go | 16 +-- pkg/auth/auth_middleware.go | 5 +- pkg/auth/authz_middleware.go | 12 +- pkg/auth/authz_middleware_mock.go | 3 +- pkg/auth/context.go | 12 +- pkg/client/ocm/authorization.go | 16 ++- pkg/client/ocm/authorization_mock.go | 10 +- pkg/client/ocm/client.go | 7 +- pkg/config/config.go | 2 +- pkg/config/config_test.go | 17 ++- pkg/config/db.go | 5 +- pkg/config/logging_test.go | 32 +++--- pkg/config/metrics.go | 5 +- pkg/config/ocm.go | 20 +++- pkg/config/server.go | 5 +- pkg/dao/adapter_status.go | 45 ++++++-- pkg/db/migrations.go | 5 +- .../migrations/202511111044_add_clusters.go | 4 +- .../migrations/202511111055_add_node_pools.go | 4 +- .../202511111105_add_adapter_status.go | 12 +- pkg/db/migrations/migration_structs.go | 10 +- pkg/db/sql_helpers.go | 25 ++-- pkg/db/transactions.go | 2 +- pkg/errors/errors.go | 107 +++++++++++++----- pkg/handlers/cluster_nodepools.go | 6 +- pkg/handlers/cluster_nodepools_test.go | 23 +++- pkg/handlers/cluster_status.go | 5 +- pkg/handlers/nodepool_status.go | 5 +- pkg/handlers/validation.go | 9 +- pkg/logger/gorm_logger.go | 7 +- pkg/logger/logger_test.go | 26 ++--- pkg/logger/text_handler.go | 4 +- pkg/logger/text_handler_test.go | 28 ++++- pkg/middleware/masking.go | 10 +- pkg/middleware/masking_test.go | 15 ++- pkg/middleware/schema_validation_test.go | 2 +- pkg/services/adapter_status.go | 36 ++++-- pkg/services/cluster.go | 26 ++++- pkg/services/cluster_test.go | 85 ++++++++------ pkg/services/generic.go | 41 +++++-- pkg/services/generic_test.go | 8 +- pkg/services/node_pool.go | 40 +++++-- pkg/services/node_pool_test.go | 60 ++++++---- pkg/services/status_aggregation.go | 29 +++-- pkg/telemetry/otel.go | 11 +- pkg/validators/schema_validator.go | 6 +- pkg/validators/schema_validator_test.go | 6 +- test/factories/clusters.go | 12 +- test/factories/node_pools.go | 12 +- test/integration/adapter_status_test.go | 86 ++++++++++---- test/integration/clusters_test.go | 83 ++++++++++---- test/integration/integration_test.go | 3 +- test/integration/metadata_test.go | 9 +- test/integration/node_pools_test.go | 28 +++-- test/mocks/jwk_cert_server.go | 7 +- test/mocks/ocm.go | 10 +- 63 files changed, 821 insertions(+), 347 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index dbe483a..08f38f3 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -73,7 +73,7 @@ linters: # Code Style - misspell - lll - - revive + # - revive - gocritic # Security diff --git a/cmd/hyperfleet-api/servecmd/cmd.go b/cmd/hyperfleet-api/servecmd/cmd.go index 1b2f072..0c3d0c5 100755 --- a/cmd/hyperfleet-api/servecmd/cmd.go +++ b/cmd/hyperfleet-api/servecmd/cmd.go @@ -113,7 +113,9 @@ func runServe(cmd *cobra.Command, args []string) { } if tp != nil { - shutdownCtx, cancel := context.WithTimeout(context.Background(), environments.Environment().Config.Health.ShutdownTimeout) + shutdownCtx, cancel := context.WithTimeout( + context.Background(), environments.Environment().Config.Health.ShutdownTimeout, + ) defer cancel() if err := telemetry.Shutdown(shutdownCtx, tp); err != nil { logger.WithError(ctx, err).Error("Failed to shutdown OpenTelemetry") diff --git a/cmd/hyperfleet-api/server/api_server.go b/cmd/hyperfleet-api/server/api_server.go index 2e23d0c..3121172 100755 --- a/cmd/hyperfleet-api/server/api_server.go +++ b/cmd/hyperfleet-api/server/api_server.go @@ -93,8 +93,9 @@ func NewAPIServer() Server { mainHandler = removeTrailingSlash(mainHandler) s.httpServer = &http.Server{ - Addr: env().Config.Server.BindAddress, - Handler: mainHandler, + Addr: env().Config.Server.BindAddress, + Handler: mainHandler, + ReadHeaderTimeout: 10 * time.Second, } return s @@ -136,7 +137,8 @@ func (s apiServer) Listen() (listener net.Listener, err error) { return net.Listen("tcp", env().Config.Server.BindAddress) } -// Start listening on the configured port and start the server. This is a convenience wrapper for Listen() and Serve(listener Listener) +// Start listening on the configured port and start the server. +// This is a convenience wrapper for Listen() and Serve(listener Listener) func (s apiServer) Start() { ctx := context.Background() listener, err := s.Listen() diff --git a/cmd/hyperfleet-api/server/health_server.go b/cmd/hyperfleet-api/server/health_server.go index 9e78151..8ddf574 100644 --- a/cmd/hyperfleet-api/server/health_server.go +++ b/cmd/hyperfleet-api/server/health_server.go @@ -30,8 +30,9 @@ func NewHealthServer() Server { listening: make(chan struct{}), } s.httpServer = &http.Server{ - Addr: env().Config.Health.BindAddress, - Handler: mainHandler, + Addr: env().Config.Health.BindAddress, + Handler: mainHandler, + ReadHeaderTimeout: 10 * time.Second, } return s } diff --git a/cmd/hyperfleet-api/server/metrics_server.go b/cmd/hyperfleet-api/server/metrics_server.go index b985ef7..259e836 100755 --- a/cmd/hyperfleet-api/server/metrics_server.go +++ b/cmd/hyperfleet-api/server/metrics_server.go @@ -5,6 +5,7 @@ import ( "fmt" "net" "net/http" + "time" "github.com/gorilla/mux" @@ -25,8 +26,9 @@ func NewMetricsServer() Server { s := &metricsServer{} s.httpServer = &http.Server{ - Addr: env().Config.Metrics.BindAddress, - Handler: mainHandler, + Addr: env().Config.Metrics.BindAddress, + Handler: mainHandler, + ReadHeaderTimeout: 10 * time.Second, } return s } diff --git a/cmd/hyperfleet-api/server/routes.go b/cmd/hyperfleet-api/server/routes.go index 36fe34f..c693476 100755 --- a/cmd/hyperfleet-api/server/routes.go +++ b/cmd/hyperfleet-api/server/routes.go @@ -23,7 +23,12 @@ type ServicesInterface interface { GetService(name string) interface{} } -type RouteRegistrationFunc func(apiV1Router *mux.Router, services ServicesInterface, authMiddleware auth.JWTMiddleware, authzMiddleware auth.AuthorizationMiddleware) +type RouteRegistrationFunc func( + apiV1Router *mux.Router, + services ServicesInterface, + authMiddleware auth.JWTMiddleware, + authzMiddleware auth.AuthorizationMiddleware, +) var routeRegistry = make(map[string]RouteRegistrationFunc) @@ -31,7 +36,12 @@ func RegisterRoutes(name string, registrationFunc RouteRegistrationFunc) { routeRegistry[name] = registrationFunc } -func LoadDiscoveredRoutes(apiV1Router *mux.Router, services ServicesInterface, authMiddleware auth.JWTMiddleware, authzMiddleware auth.AuthorizationMiddleware) { +func LoadDiscoveredRoutes( + apiV1Router *mux.Router, + services ServicesInterface, + authMiddleware auth.JWTMiddleware, + authzMiddleware auth.AuthorizationMiddleware, +) { for name, registrationFunc := range routeRegistry { registrationFunc(apiV1Router, services, authMiddleware, authzMiddleware) _ = name // prevent unused variable warning diff --git a/pkg/api/presenters/cluster_test.go b/pkg/api/presenters/cluster_test.go index d0a89d3..3a61f3f 100644 --- a/pkg/api/presenters/cluster_test.go +++ b/pkg/api/presenters/cluster_test.go @@ -12,6 +12,10 @@ import ( "github.com/openshift-hyperfleet/hyperfleet-api/pkg/util" ) +const ( + testConditionReady = "Ready" +) + // Helper function to create test ClusterCreateRequest func createTestClusterRequest() *openapi.ClusterCreateRequest { labels := map[string]string{"env": "test"} @@ -157,7 +161,7 @@ func TestPresentCluster_Complete(t *testing.T) { RegisterTestingT(t) now := time.Now() - reason := "Ready" + reason := testConditionReady message := "Cluster is ready" // Create domain ResourceCondition @@ -218,7 +222,7 @@ func TestPresentCluster_Complete(t *testing.T) { Expect(len(result.Status.Conditions)).To(Equal(1)) Expect(result.Status.Conditions[0].Type).To(Equal("Available")) Expect(result.Status.Conditions[0].Status).To(Equal(openapi.ResourceConditionStatusTrue)) - Expect(*result.Status.Conditions[0].Reason).To(Equal("Ready")) + Expect(*result.Status.Conditions[0].Reason).To(Equal(testConditionReady)) // Verify timestamps Expect(result.CreatedTime.Unix()).To(Equal(now.Unix())) @@ -300,7 +304,7 @@ func TestPresentCluster_StatusConditionsConversion(t *testing.T) { // First condition Expect(result.Status.Conditions[0].Type).To(Equal("Available")) Expect(result.Status.Conditions[0].Status).To(Equal(openapi.ResourceConditionStatusTrue)) - Expect(*result.Status.Conditions[0].Reason).To(Equal("Ready")) + Expect(*result.Status.Conditions[0].Reason).To(Equal(testConditionReady)) Expect(*result.Status.Conditions[0].Message).To(Equal("All systems operational")) // Second condition diff --git a/pkg/api/presenters/slice_filter.go b/pkg/api/presenters/slice_filter.go index b2e9a6a..9a819e4 100755 --- a/pkg/api/presenters/slice_filter.go +++ b/pkg/api/presenters/slice_filter.go @@ -103,7 +103,8 @@ func validate(model interface{}, in map[string]bool, prefix string) *errors.Serv } field := reflectValue.Field(i).Interface() name := strings.Split(tag, ",")[0] - if kind == reflect.Struct { + switch kind { //nolint:exhaustive // Only Struct and Slice need special handling, rest handled by default + case reflect.Struct: if t.Type == reflect.TypeOf(&time.Time{}) { delete(in, name) } else { @@ -116,12 +117,12 @@ func validate(model interface{}, in map[string]bool, prefix string) *errors.Serv } } } - } else if t.Type.Kind() == reflect.Slice { + case reflect.Slice: // TODO: We don't support Slices' validation :( in = removeStar(in, name) continue - //_ = validate(slice, in, name) - } else { + // _ = validate(slice, in, name) + default: prefixedName := name if prefix != "" { prefixedName = fmt.Sprintf("%s.%s", prefix, name) @@ -188,7 +189,8 @@ func structToMap(item interface{}, in map[string]bool, prefix string) map[string } field := reflectValue.Field(i).Interface() name := strings.Split(tag, ",")[0] - if kind == reflect.Struct { + switch kind { //nolint:exhaustive // Only Struct and Slice need special handling, rest handled by default + case reflect.Struct: if t.Type == reflect.TypeOf(&time.Time{}) { if _, ok := in[name]; ok { if timePtr, ok := field.(*time.Time); ok && timePtr != nil { @@ -205,7 +207,7 @@ func structToMap(item interface{}, in map[string]bool, prefix string) map[string res[name] = subStruct } } - } else if kind == reflect.Slice { + case reflect.Slice: s := reflect.ValueOf(field) if s.Len() > 0 { result := make([]interface{}, 0, s.Len()) @@ -220,7 +222,7 @@ func structToMap(item interface{}, in map[string]bool, prefix string) map[string res[name] = result } } - } else { + default: prefixedName := name if prefix != "" { prefixedName = fmt.Sprintf("%s.%s", prefix, name) diff --git a/pkg/auth/auth_middleware.go b/pkg/auth/auth_middleware.go index c38804e..4730230 100755 --- a/pkg/auth/auth_middleware.go +++ b/pkg/auth/auth_middleware.go @@ -26,7 +26,10 @@ func (a *Middleware) AuthenticateAccountJWT(next http.Handler) http.Handler { ctx := r.Context() payload, err := GetAuthPayload(r) if err != nil { - handleError(ctx, w, r, errors.CodeAuthNoCredentials, fmt.Sprintf("Unable to get payload details from JWT token: %s", err)) + handleError( + ctx, w, r, errors.CodeAuthNoCredentials, + fmt.Sprintf("Unable to get payload details from JWT token: %s", err), + ) return } diff --git a/pkg/auth/authz_middleware.go b/pkg/auth/authz_middleware.go index 508855a..e4a8412 100755 --- a/pkg/auth/authz_middleware.go +++ b/pkg/auth/authz_middleware.go @@ -46,8 +46,8 @@ func (a authzMiddleware) AuthorizeApi(next http.Handler) http.Handler { if username == "" { _ = fmt.Errorf("authenticated username not present in request context") // TODO - //body := api.E500.Format(r, "Authentication details not present in context") - //api.SendError(w, r, &body) + // body := api.E500.Format(r, "Authentication details not present in context") + // api.SendError(w, r, &body) return } @@ -56,8 +56,8 @@ func (a authzMiddleware) AuthorizeApi(next http.Handler) http.Handler { if err != nil { _ = fmt.Errorf("unable to make authorization request: %s", err) // TODO - //body := api.E500.Format(r, "Unable to make authorization request") - //api.SendError(w, r, &body) + // body := api.E500.Format(r, "Unable to make authorization request") + // api.SendError(w, r, &body) return } @@ -66,7 +66,7 @@ func (a authzMiddleware) AuthorizeApi(next http.Handler) http.Handler { } // TODO - //body := api.E403.Format(r, "") - //api.SendError(w, r, &body) + // body := api.E403.Format(r, "") + // api.SendError(w, r, &body) }) } diff --git a/pkg/auth/authz_middleware_mock.go b/pkg/auth/authz_middleware_mock.go index 4f8f7be..914b9c8 100755 --- a/pkg/auth/authz_middleware_mock.go +++ b/pkg/auth/authz_middleware_mock.go @@ -16,7 +16,8 @@ func NewAuthzMiddlewareMock() AuthorizationMiddleware { func (a authzMiddlewareMock) AuthorizeApi(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - logger.With(r.Context(), logger.HTTPMethod(r.Method), logger.HTTPPath(r.URL.Path)).Info("Mock authz allows / for method/path") + logger.With(r.Context(), logger.HTTPMethod(r.Method), logger.HTTPPath(r.URL.Path)). + Info("Mock authz allows / for method/path") next.ServeHTTP(w, r) }) } diff --git a/pkg/auth/context.go b/pkg/auth/context.go index f42b566..70fd9ea 100755 --- a/pkg/auth/context.go +++ b/pkg/auth/context.go @@ -70,12 +70,12 @@ func GetAuthPayloadFromContext(ctx context.Context) (*Payload, error) { // TODO figure out how to unmarshal jwt.mapclaims into the struct to avoid all the // type assertions // - //var accountAuth api.AuthPayload - //err := json.Unmarshal([]byte(claims), &accountAuth) - //if err != nil { - // err := fmt.Errorf("Unable to parse JWT token claims") - // return nil, err - //} + // var accountAuth api.AuthPayload + // err := json.Unmarshal([]byte(claims), &accountAuth) + // if err != nil { + // err := fmt.Errorf("Unable to parse JWT token claims") + // return nil, err + // } payload := &Payload{} // default to the values we expect from RHSSO diff --git a/pkg/client/ocm/authorization.go b/pkg/client/ocm/authorization.go index 29d3bf3..7c55ae8 100755 --- a/pkg/client/ocm/authorization.go +++ b/pkg/client/ocm/authorization.go @@ -8,15 +8,21 @@ import ( ) type Authorization interface { - SelfAccessReview(ctx context.Context, action, resourceType, organizationID, subscriptionID, clusterID string) (allowed bool, err error) - AccessReview(ctx context.Context, username, action, resourceType, organizationID, subscriptionID, clusterID string) (allowed bool, err error) + SelfAccessReview( + ctx context.Context, action, resourceType, organizationID, subscriptionID, clusterID string, + ) (allowed bool, err error) + AccessReview( + ctx context.Context, username, action, resourceType, organizationID, subscriptionID, clusterID string, + ) (allowed bool, err error) } type authorization service var _ Authorization = &authorization{} -func (a authorization) SelfAccessReview(ctx context.Context, action, resourceType, organizationID, subscriptionID, clusterID string) (allowed bool, err error) { +func (a authorization) SelfAccessReview( + ctx context.Context, action, resourceType, organizationID, subscriptionID, clusterID string, +) (allowed bool, err error) { con := a.client.connection selfAccessReview := con.Authorizations().V1().SelfAccessReview() @@ -45,7 +51,9 @@ func (a authorization) SelfAccessReview(ctx context.Context, action, resourceTyp return response.Allowed(), nil } -func (a authorization) AccessReview(ctx context.Context, username, action, resourceType, organizationID, subscriptionID, clusterID string) (allowed bool, err error) { +func (a authorization) AccessReview( + ctx context.Context, username, action, resourceType, organizationID, subscriptionID, clusterID string, +) (allowed bool, err error) { con := a.client.connection accessReview := con.Authorizations().V1().AccessReview() diff --git a/pkg/client/ocm/authorization_mock.go b/pkg/client/ocm/authorization_mock.go index e5ee369..ad63467 100755 --- a/pkg/client/ocm/authorization_mock.go +++ b/pkg/client/ocm/authorization_mock.go @@ -9,10 +9,16 @@ type authorizationMock service var _ Authorization = &authorizationMock{} -func (a authorizationMock) SelfAccessReview(ctx context.Context, action, resourceType, organizationID, subscriptionID, clusterID string) (allowed bool, err error) { +func (a authorizationMock) SelfAccessReview( + ctx context.Context, + action, resourceType, organizationID, subscriptionID, clusterID string, +) (allowed bool, err error) { return true, nil } -func (a authorizationMock) AccessReview(ctx context.Context, username, action, resourceType, organizationID, subscriptionID, clusterID string) (allowed bool, err error) { +func (a authorizationMock) AccessReview( + ctx context.Context, + username, action, resourceType, organizationID, subscriptionID, clusterID string, +) (allowed bool, err error) { return true, nil } diff --git a/pkg/client/ocm/client.go b/pkg/client/ocm/client.go index 4a57cbf..50765e4 100755 --- a/pkg/client/ocm/client.go +++ b/pkg/client/ocm/client.go @@ -58,11 +58,12 @@ func (c *Client) newConnection() error { URL(c.config.BaseURL). MetricsSubsystem("api_outbound") - if c.config.ClientID != "" && c.config.ClientSecret != "" { + switch { + case c.config.ClientID != "" && c.config.ClientSecret != "": builder = builder.Client(c.config.ClientID, c.config.ClientSecret) - } else if c.config.SelfToken != "" { + case c.config.SelfToken != "": builder = builder.Tokens(c.config.SelfToken) - } else { + default: return fmt.Errorf("can't build OCM client connection: no Client/Secret or Token has been provided") } diff --git a/pkg/config/config.go b/pkg/config/config.go index c0092ae..7395706 100755 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -117,7 +117,7 @@ func ReadFile(file string) (string, error) { } // Read the file - buf, err := os.ReadFile(absFilePath) + buf, err := os.ReadFile(absFilePath) //nolint:gosec // File path is controlled by config file settings if err != nil { return "", err } diff --git a/pkg/config/config_test.go b/pkg/config/config_test.go index 0cc470f..87f69f1 100755 --- a/pkg/config/config_test.go +++ b/pkg/config/config_test.go @@ -1,7 +1,6 @@ package config import ( - "log" "os" "testing" @@ -12,10 +11,10 @@ func TestConfigReadStringFile(t *testing.T) { RegisterTestingT(t) stringFile, err := createConfigFile("string", "example\n") - defer os.Remove(stringFile.Name()) //nolint:errcheck if err != nil { - log.Fatal(err) + t.Fatalf("failed to create config file: %v", err) } + defer os.Remove(stringFile.Name()) //nolint:errcheck var stringConfig string err = readFileValueString(stringFile.Name(), &stringConfig) @@ -27,10 +26,10 @@ func TestConfigReadIntFile(t *testing.T) { RegisterTestingT(t) intFile, err := createConfigFile("int", "123") - defer os.Remove(intFile.Name()) //nolint:errcheck if err != nil { - log.Fatal(err) + t.Fatalf("failed to create config file: %v", err) } + defer os.Remove(intFile.Name()) //nolint:errcheck var intConfig int err = readFileValueInt(intFile.Name(), &intConfig) @@ -42,10 +41,10 @@ func TestConfigReadBoolFile(t *testing.T) { RegisterTestingT(t) boolFile, err := createConfigFile("bool", "true") - defer os.Remove(boolFile.Name()) //nolint:errcheck if err != nil { - log.Fatal(err) + t.Fatalf("failed to create config file: %v", err) } + defer os.Remove(boolFile.Name()) //nolint:errcheck var boolConfig = false err = readFileValueBool(boolFile.Name(), &boolConfig) @@ -57,10 +56,10 @@ func TestConfigReadQuotedFile(t *testing.T) { RegisterTestingT(t) stringFile, err := createConfigFile("string", "example") - defer os.Remove(stringFile.Name()) //nolint:errcheck if err != nil { - log.Fatal(err) + t.Fatalf("failed to create config file: %v", err) } + defer os.Remove(stringFile.Name()) //nolint:errcheck quotedFileName := "\"" + stringFile.Name() + "\"" val, err := ReadFile(quotedFileName) diff --git a/pkg/config/db.go b/pkg/config/db.go index 28926ce..d6c9de9 100755 --- a/pkg/config/db.go +++ b/pkg/config/db.go @@ -55,7 +55,10 @@ func (c *DatabaseConfig) AddFlags(fs *pflag.FlagSet) { fs.StringVar(&c.RootCertFile, "db-rootcert", c.RootCertFile, "Database root certificate file") fs.StringVar(&c.SSLMode, "db-sslmode", c.SSLMode, "Database ssl mode (disable | require | verify-ca | verify-full)") fs.BoolVar(&c.Debug, "enable-db-debug", c.Debug, " framework's debug mode") - fs.IntVar(&c.MaxOpenConnections, "db-max-open-connections", c.MaxOpenConnections, "Maximum open DB connections for this instance") + fs.IntVar( + &c.MaxOpenConnections, "db-max-open-connections", c.MaxOpenConnections, + "Maximum open DB connections for this instance", + ) } // BindEnv reads configuration from environment variables diff --git a/pkg/config/logging_test.go b/pkg/config/logging_test.go index 5e93efe..4ecf534 100644 --- a/pkg/config/logging_test.go +++ b/pkg/config/logging_test.go @@ -7,6 +7,10 @@ import ( "github.com/spf13/pflag" ) +const ( + testLogLevelDebug = "debug" +) + // TestNewLoggingConfig_Defaults tests default configuration values func TestNewLoggingConfig_Defaults(t *testing.T) { cfg := NewLoggingConfig() @@ -66,9 +70,9 @@ func TestLoggingConfig_AddFlags(t *testing.T) { }{ { name: "custom values", - args: []string{"--log-level=debug", "--log-format=text", "--log-output=stderr"}, + args: []string{"--log-level=" + testLogLevelDebug, "--log-format=text", "--log-output=stderr"}, expected: map[string]string{ - "level": "debug", + "level": testLogLevelDebug, "format": "text", "output": "stderr", }, @@ -116,13 +120,13 @@ func TestLoggingConfig_BindEnv(t *testing.T) { { name: "basic logging env vars", envVars: map[string]string{ - "LOG_LEVEL": "debug", + "LOG_LEVEL": testLogLevelDebug, "LOG_FORMAT": "text", "LOG_OUTPUT": "stderr", }, validate: func(t *testing.T, cfg *LoggingConfig) { - if cfg.Level != "debug" { - t.Errorf("expected Level 'debug', got '%s'", cfg.Level) + if cfg.Level != testLogLevelDebug { + t.Errorf("expected Level %q, got '%s'", testLogLevelDebug, cfg.Level) } if cfg.Format != "text" { t.Errorf("expected Format 'text', got '%s'", cfg.Format) @@ -330,20 +334,20 @@ func TestLoggingConfig_FlagsOverrideEnv(t *testing.T) { cfg.AddFlags(fs) // Parse flags with different value - args := []string{"--log-level=debug"} + args := []string{"--log-level=" + testLogLevelDebug} if err := fs.Parse(args); err != nil { t.Fatalf("failed to parse flags: %v", err) } // Before BindEnv, should have flag value - if cfg.Level != "debug" { - t.Errorf("expected Level 'debug' from flag, got '%s'", cfg.Level) + if cfg.Level != testLogLevelDebug { + t.Errorf("expected Level %q from flag, got '%s'", testLogLevelDebug, cfg.Level) } // After BindEnv, flag should take priority over env var cfg.BindEnv(fs) - if cfg.Level != "debug" { - t.Errorf("expected Level 'debug' (flag > env), got '%s'", cfg.Level) + if cfg.Level != testLogLevelDebug { + t.Errorf("expected Level %q (flag > env), got '%s'", testLogLevelDebug, cfg.Level) } } @@ -413,15 +417,15 @@ func TestLoggingConfig_PriorityMixed(t *testing.T) { cfg.AddFlags(fs) // Only set flag for log-level - if err := fs.Parse([]string{"--log-level=debug"}); err != nil { + if err := fs.Parse([]string{"--log-level=" + testLogLevelDebug}); err != nil { t.Fatalf("failed to parse flags: %v", err) } cfg.BindEnv(fs) // log-level: flag wins over env - if cfg.Level != "debug" { - t.Errorf("expected Level 'debug' (flag > env), got '%s'", cfg.Level) + if cfg.Level != testLogLevelDebug { + t.Errorf("expected Level %q (flag > env), got '%s'", testLogLevelDebug, cfg.Level) } // log-format: env wins over default if cfg.Format != "text" { @@ -459,7 +463,7 @@ func TestLoggingConfig_OTelMaskingAlwaysApply(t *testing.T) { cfg.AddFlags(fs) // Parse with some other flag - if err := fs.Parse([]string{"--log-level=debug"}); err != nil { + if err := fs.Parse([]string{"--log-level=" + testLogLevelDebug}); err != nil { t.Fatalf("failed to parse flags: %v", err) } diff --git a/pkg/config/metrics.go b/pkg/config/metrics.go index 0008f75..a564a19 100755 --- a/pkg/config/metrics.go +++ b/pkg/config/metrics.go @@ -23,7 +23,10 @@ func NewMetricsConfig() *MetricsConfig { func (s *MetricsConfig) AddFlags(fs *pflag.FlagSet) { fs.StringVar(&s.BindAddress, "metrics-server-bindaddress", s.BindAddress, "Metrics server bind adddress") fs.BoolVar(&s.EnableHTTPS, "enable-metrics-https", s.EnableHTTPS, "Enable HTTPS for metrics server") - fs.DurationVar(&s.LabelMetricsInclusionDuration, "label-metrics-inclusion-duration", 7*24*time.Hour, "A cluster's last telemetry date needs be within in this duration in order to have labels collected") + fs.DurationVar( + &s.LabelMetricsInclusionDuration, "label-metrics-inclusion-duration", 7*24*time.Hour, + "A cluster's last telemetry date needs be within in this duration in order to have labels collected", + ) } func (s *MetricsConfig) ReadFiles() error { diff --git a/pkg/config/ocm.go b/pkg/config/ocm.go index 1f7c6ab..092c609 100755 --- a/pkg/config/ocm.go +++ b/pkg/config/ocm.go @@ -30,11 +30,23 @@ func NewOCMConfig() *OCMConfig { } func (c *OCMConfig) AddFlags(fs *pflag.FlagSet) { - fs.StringVar(&c.ClientIDFile, "ocm-client-id-file", c.ClientIDFile, "File containing OCM API privileged account client-id") - fs.StringVar(&c.ClientSecretFile, "ocm-client-secret-file", c.ClientSecretFile, "File containing OCM API privileged account client-secret") - fs.StringVar(&c.SelfTokenFile, "self-token-file", c.SelfTokenFile, "File containing OCM API privileged offline SSO token") + fs.StringVar( + &c.ClientIDFile, "ocm-client-id-file", c.ClientIDFile, + "File containing OCM API privileged account client-id", + ) + fs.StringVar( + &c.ClientSecretFile, "ocm-client-secret-file", c.ClientSecretFile, + "File containing OCM API privileged account client-secret", + ) + fs.StringVar( + &c.SelfTokenFile, "self-token-file", c.SelfTokenFile, + "File containing OCM API privileged offline SSO token", + ) fs.StringVar(&c.BaseURL, "ocm-base-url", c.BaseURL, "The base URL of the OCM API, integration by default") - fs.StringVar(&c.TokenURL, "ocm-token-url", c.TokenURL, "The base URL that OCM uses to request tokens, stage by default") + fs.StringVar( + &c.TokenURL, "ocm-token-url", c.TokenURL, + "The base URL that OCM uses to request tokens, stage by default", + ) fs.BoolVar(&c.Debug, "ocm-debug", c.Debug, "Debug flag for OCM API") fs.BoolVar(&c.EnableMock, "enable-ocm-mock", c.EnableMock, "Enable mock ocm clients") } diff --git a/pkg/config/server.go b/pkg/config/server.go index 9258fd8..854c3ea 100755 --- a/pkg/config/server.go +++ b/pkg/config/server.go @@ -47,7 +47,10 @@ func (s *ServerConfig) AddFlags(fs *pflag.FlagSet) { fs.StringVar(&s.HTTPSKeyFile, "https-key-file", s.HTTPSKeyFile, "The path to the tls.key file.") fs.BoolVar(&s.EnableHTTPS, "enable-https", s.EnableHTTPS, "Enable HTTPS rather than HTTP") fs.BoolVar(&s.EnableJWT, "enable-jwt", s.EnableJWT, "Enable JWT authentication validation") - fs.BoolVar(&s.EnableAuthz, "enable-authz", s.EnableAuthz, "Enable Authorization on endpoints, should only be disabled for debug") + fs.BoolVar( + &s.EnableAuthz, "enable-authz", s.EnableAuthz, + "Enable Authorization on endpoints, should only be disabled for debug", + ) fs.StringVar(&s.JwkCertFile, "jwk-cert-file", s.JwkCertFile, "JWK Certificate file") fs.StringVar(&s.JwkCertURL, "jwk-cert-url", s.JwkCertURL, "JWK Certificate URL") fs.StringVar(&s.ACLFile, "acl-file", s.ACLFile, "Access control list file") diff --git a/pkg/dao/adapter_status.go b/pkg/dao/adapter_status.go index c0f8c47..cdb56f3 100644 --- a/pkg/dao/adapter_status.go +++ b/pkg/dao/adapter_status.go @@ -22,8 +22,12 @@ type AdapterStatusDao interface { Upsert(ctx context.Context, adapterStatus *api.AdapterStatus) (*api.AdapterStatus, error) Delete(ctx context.Context, id string) error FindByResource(ctx context.Context, resourceType, resourceID string) (api.AdapterStatusList, error) - FindByResourcePaginated(ctx context.Context, resourceType, resourceID string, offset, limit int) (api.AdapterStatusList, int64, error) - FindByResourceAndAdapter(ctx context.Context, resourceType, resourceID, adapter string) (*api.AdapterStatus, error) + FindByResourcePaginated( + ctx context.Context, resourceType, resourceID string, offset, limit int, + ) (api.AdapterStatusList, int64, error) + FindByResourceAndAdapter( + ctx context.Context, resourceType, resourceID, adapter string, + ) (*api.AdapterStatus, error) All(ctx context.Context) (api.AdapterStatusList, error) } @@ -46,7 +50,9 @@ func (d *sqlAdapterStatusDao) Get(ctx context.Context, id string) (*api.AdapterS return &adapterStatus, nil } -func (d *sqlAdapterStatusDao) Create(ctx context.Context, adapterStatus *api.AdapterStatus) (*api.AdapterStatus, error) { +func (d *sqlAdapterStatusDao) Create( + ctx context.Context, adapterStatus *api.AdapterStatus, +) (*api.AdapterStatus, error) { g2 := (*d.sessionFactory).New(ctx) if err := g2.Omit(clause.Associations).Create(adapterStatus).Error; err != nil { db.MarkForRollback(ctx, err) @@ -55,7 +61,9 @@ func (d *sqlAdapterStatusDao) Create(ctx context.Context, adapterStatus *api.Ada return adapterStatus, nil } -func (d *sqlAdapterStatusDao) Replace(ctx context.Context, adapterStatus *api.AdapterStatus) (*api.AdapterStatus, error) { +func (d *sqlAdapterStatusDao) Replace( + ctx context.Context, adapterStatus *api.AdapterStatus, +) (*api.AdapterStatus, error) { g2 := (*d.sessionFactory).New(ctx) if err := g2.Omit(clause.Associations).Save(adapterStatus).Error; err != nil { db.MarkForRollback(ctx, err) @@ -66,11 +74,15 @@ func (d *sqlAdapterStatusDao) Replace(ctx context.Context, adapterStatus *api.Ad // Upsert creates or updates an adapter status based on resource_type, resource_id, and adapter // This implements the upsert semantic required by the new API spec -func (d *sqlAdapterStatusDao) Upsert(ctx context.Context, adapterStatus *api.AdapterStatus) (*api.AdapterStatus, error) { +func (d *sqlAdapterStatusDao) Upsert( + ctx context.Context, adapterStatus *api.AdapterStatus, +) (*api.AdapterStatus, error) { g2 := (*d.sessionFactory).New(ctx) // Try to find existing adapter status - existing, err := d.FindByResourceAndAdapter(ctx, adapterStatus.ResourceType, adapterStatus.ResourceID, adapterStatus.Adapter) + existing, err := d.FindByResourceAndAdapter( + ctx, adapterStatus.ResourceType, adapterStatus.ResourceID, adapterStatus.Adapter, + ) if err != nil { // If not found, create new @@ -107,23 +119,29 @@ func (d *sqlAdapterStatusDao) Upsert(ctx context.Context, adapterStatus *api.Ada func (d *sqlAdapterStatusDao) Delete(ctx context.Context, id string) error { g2 := (*d.sessionFactory).New(ctx) - if err := g2.Omit(clause.Associations).Delete(&api.AdapterStatus{Meta: api.Meta{ID: id}}).Error; err != nil { + adapterStatus := &api.AdapterStatus{Meta: api.Meta{ID: id}} + if err := g2.Omit(clause.Associations).Delete(adapterStatus).Error; err != nil { db.MarkForRollback(ctx, err) return err } return nil } -func (d *sqlAdapterStatusDao) FindByResource(ctx context.Context, resourceType, resourceID string) (api.AdapterStatusList, error) { +func (d *sqlAdapterStatusDao) FindByResource( + ctx context.Context, resourceType, resourceID string, +) (api.AdapterStatusList, error) { g2 := (*d.sessionFactory).New(ctx) statuses := api.AdapterStatusList{} - if err := g2.Where("resource_type = ? AND resource_id = ?", resourceType, resourceID).Find(&statuses).Error; err != nil { + query := g2.Where("resource_type = ? AND resource_id = ?", resourceType, resourceID) + if err := query.Find(&statuses).Error; err != nil { return nil, err } return statuses, nil } -func (d *sqlAdapterStatusDao) FindByResourcePaginated(ctx context.Context, resourceType, resourceID string, offset, limit int) (api.AdapterStatusList, int64, error) { +func (d *sqlAdapterStatusDao) FindByResourcePaginated( + ctx context.Context, resourceType, resourceID string, offset, limit int, +) (api.AdapterStatusList, int64, error) { g2 := (*d.sessionFactory).New(ctx) statuses := api.AdapterStatusList{} var total int64 @@ -144,10 +162,13 @@ func (d *sqlAdapterStatusDao) FindByResourcePaginated(ctx context.Context, resou return statuses, total, nil } -func (d *sqlAdapterStatusDao) FindByResourceAndAdapter(ctx context.Context, resourceType, resourceID, adapter string) (*api.AdapterStatus, error) { +func (d *sqlAdapterStatusDao) FindByResourceAndAdapter( + ctx context.Context, resourceType, resourceID, adapter string, +) (*api.AdapterStatus, error) { g2 := (*d.sessionFactory).New(ctx) var adapterStatus api.AdapterStatus - if err := g2.Where("resource_type = ? AND resource_id = ? AND adapter = ?", resourceType, resourceID, adapter).Take(&adapterStatus).Error; err != nil { + query := g2.Where("resource_type = ? AND resource_id = ? AND adapter = ?", resourceType, resourceID, adapter) + if err := query.Take(&adapterStatus).Error; err != nil { return nil, err } return &adapterStatus, nil diff --git a/pkg/db/migrations.go b/pkg/db/migrations.go index 230125a..63fa6e0 100755 --- a/pkg/db/migrations.go +++ b/pkg/db/migrations.go @@ -11,8 +11,9 @@ import ( "github.com/openshift-hyperfleet/hyperfleet-api/pkg/logger" ) -// gormigrate is a wrapper for gorm's migration functions that adds schema versioning and rollback capabilities. -// For help writing migration steps, see the gorm documentation on migrations: http://doc.gorm.io/database.html#migration +// gormigrate is a wrapper for gorm's migration functions that adds schema versioning +// and rollback capabilities. For help writing migration steps, see the gorm documentation +// on migrations: http://doc.gorm.io/database.html#migration func Migrate(g2 *gorm.DB) error { m := newGormigrate(g2) diff --git a/pkg/db/migrations/202511111044_add_clusters.go b/pkg/db/migrations/202511111044_add_clusters.go index 18cbeb4..e284aa3 100644 --- a/pkg/db/migrations/202511111044_add_clusters.go +++ b/pkg/db/migrations/202511111044_add_clusters.go @@ -54,7 +54,9 @@ func addClusters() *gormigrate.Migration { } // Create unique index on name (only for non-deleted records) - if err := tx.Exec("CREATE UNIQUE INDEX IF NOT EXISTS idx_clusters_name ON clusters(name) WHERE deleted_at IS NULL;").Error; err != nil { + createIndexSQL := "CREATE UNIQUE INDEX IF NOT EXISTS idx_clusters_name " + + "ON clusters(name) WHERE deleted_at IS NULL;" + if err := tx.Exec(createIndexSQL).Error; err != nil { return err } diff --git a/pkg/db/migrations/202511111055_add_node_pools.go b/pkg/db/migrations/202511111055_add_node_pools.go index a157193..c719c1d 100644 --- a/pkg/db/migrations/202511111055_add_node_pools.go +++ b/pkg/db/migrations/202511111055_add_node_pools.go @@ -52,7 +52,9 @@ func addNodePools() *gormigrate.Migration { } // Create index on deleted_at for soft deletes - if err := tx.Exec("CREATE INDEX IF NOT EXISTS idx_node_pools_deleted_at ON node_pools(deleted_at);").Error; err != nil { + createIdxSQL := "CREATE INDEX IF NOT EXISTS idx_node_pools_deleted_at " + + "ON node_pools(deleted_at);" + if err := tx.Exec(createIdxSQL).Error; err != nil { return err } diff --git a/pkg/db/migrations/202511111105_add_adapter_status.go b/pkg/db/migrations/202511111105_add_adapter_status.go index 947ba8c..3a35819 100644 --- a/pkg/db/migrations/202511111105_add_adapter_status.go +++ b/pkg/db/migrations/202511111105_add_adapter_status.go @@ -47,18 +47,24 @@ func addAdapterStatus() *gormigrate.Migration { } // Create index on deleted_at for soft deletes - if err := tx.Exec("CREATE INDEX IF NOT EXISTS idx_adapter_statuses_deleted_at ON adapter_statuses(deleted_at);").Error; err != nil { + createIdxDeletedSQL := "CREATE INDEX IF NOT EXISTS idx_adapter_statuses_deleted_at " + + "ON adapter_statuses(deleted_at);" + if err := tx.Exec(createIdxDeletedSQL).Error; err != nil { return err } // Create composite index on resource_type and resource_id for lookups - if err := tx.Exec("CREATE INDEX IF NOT EXISTS idx_adapter_statuses_resource ON adapter_statuses(resource_type, resource_id);").Error; err != nil { + createIdxResourceSQL := "CREATE INDEX IF NOT EXISTS idx_adapter_statuses_resource " + + "ON adapter_statuses(resource_type, resource_id);" + if err := tx.Exec(createIdxResourceSQL).Error; err != nil { return err } // Create unique index on resource_type, resource_id, and adapter // This ensures one adapter status per resource per adapter - if err := tx.Exec("CREATE UNIQUE INDEX IF NOT EXISTS idx_adapter_statuses_unique ON adapter_statuses(resource_type, resource_id, adapter) WHERE deleted_at IS NULL;").Error; err != nil { + createIdxUniqueSQL := "CREATE UNIQUE INDEX IF NOT EXISTS idx_adapter_statuses_unique " + + "ON adapter_statuses(resource_type, resource_id, adapter) WHERE deleted_at IS NULL;" + if err := tx.Exec(createIdxUniqueSQL).Error; err != nil { return err } diff --git a/pkg/db/migrations/migration_structs.go b/pkg/db/migrations/migration_structs.go index 2077fd7..00fe82e 100755 --- a/pkg/db/migrations/migration_structs.go +++ b/pkg/db/migrations/migration_structs.go @@ -9,8 +9,9 @@ import ( "github.com/go-gormigrate/gormigrate/v2" ) -// gormigrate is a wrapper for gorm's migration functions that adds schema versioning and rollback capabilities. -// For help writing migration steps, see the gorm documentation on migrations: http://doc.gorm.io/database.html#migration +// gormigrate is a wrapper for gorm's migration functions that adds schema versioning +// and rollback capabilities. For help writing migration steps, see the gorm documentation +// on migrations: http://doc.gorm.io/database.html#migration // MigrationList rules: // @@ -49,8 +50,9 @@ type fkMigration struct { } func CreateFK(g2 *gorm.DB, fks ...fkMigration) error { - var query = `ALTER TABLE %s ADD CONSTRAINT %s FOREIGN KEY (%s) REFERENCES %s ON DELETE RESTRICT ON UPDATE RESTRICT;` - var drop = `ALTER TABLE %s DROP CONSTRAINT IF EXISTS %s;` + query := `ALTER TABLE %s ADD CONSTRAINT %s FOREIGN KEY (%s) REFERENCES %s ` + + `ON DELETE RESTRICT ON UPDATE RESTRICT;` + drop := `ALTER TABLE %s DROP CONSTRAINT IF EXISTS %s;` for _, fk := range fks { name := fmt.Sprintf("fk_%s_%s", fk.Model, fk.Dest) diff --git a/pkg/db/sql_helpers.go b/pkg/db/sql_helpers.go index 920b355..fa6cced 100755 --- a/pkg/db/sql_helpers.go +++ b/pkg/db/sql_helpers.go @@ -24,7 +24,9 @@ func validateLabelKey(key string) *errors.ServiceError { } if !labelKeyPattern.MatchString(key) { - return errors.BadRequest("label key '%s' is invalid: must contain only lowercase letters, digits, and underscores", key) + return errors.BadRequest( + "label key '%s' is invalid: must contain only lowercase letters, digits, and underscores", key, + ) } return nil @@ -203,7 +205,9 @@ func conditionsNodeConverter(n tsl.Node) (interface{}, *errors.ServiceError) { // Validate condition type to prevent SQL injection if !conditionTypePattern.MatchString(conditionType) { - return nil, errors.BadRequest("condition type '%s' is invalid: must be PascalCase (e.g., Ready, Available)", conditionType) + return nil, errors.BadRequest( + "condition type '%s' is invalid: must be PascalCase (e.g., Ready, Available)", conditionType, + ) } // Get the right side value (the expected status) @@ -219,7 +223,9 @@ func conditionsNodeConverter(n tsl.Node) (interface{}, *errors.ServiceError) { // Validate condition status if !validConditionStatuses[rightStr] { - return nil, errors.BadRequest("condition status '%s' is invalid: must be True, False, or Unknown", rightStr) + return nil, errors.BadRequest( + "condition status '%s' is invalid: must be True, False, or Unknown", rightStr, + ) } // Only support equality operator for condition queries @@ -228,7 +234,8 @@ func conditionsNodeConverter(n tsl.Node) (interface{}, *errors.ServiceError) { } // Build query using jsonb_path_query_first to leverage BTREE expression index - // The index is: CREATE INDEX ... USING BTREE ((jsonb_path_query_first(status_conditions, '$[*] ? (@.type == "Ready")'))) + // The index is: CREATE INDEX ... USING BTREE + // ((jsonb_path_query_first(status_conditions, '$[*] ? (@.type == "Ready")'))) jsonPath := fmt.Sprintf(`$[*] ? (@.type == "%s")`, conditionType) return sq.Expr("jsonb_path_query_first(status_conditions, ?::jsonpath) ->> 'status' = ?", jsonPath, rightStr), nil @@ -244,12 +251,12 @@ type ConditionExpression struct { // This is necessary because the TSL library doesn't support JSONB containment operators. func ExtractConditionQueries(n tsl.Node, tableName string) (tsl.Node, []sq.Sqlizer, *errors.ServiceError) { var conditions []sq.Sqlizer - modifiedTree, err := extractConditionsWalk(n, tableName, &conditions) + modifiedTree, err := extractConditionsWalk(n, &conditions) return modifiedTree, conditions, err } // extractConditionsWalk recursively walks the tree and extracts condition queries -func extractConditionsWalk(n tsl.Node, tableName string, conditions *[]sq.Sqlizer) (tsl.Node, *errors.ServiceError) { +func extractConditionsWalk(n tsl.Node, conditions *[]sq.Sqlizer) (tsl.Node, *errors.ServiceError) { // Check if this node is a condition query if hasCondition(n) { expr, err := conditionsNodeConverter(n) @@ -274,7 +281,7 @@ func extractConditionsWalk(n tsl.Node, tableName string, conditions *[]sq.Sqlize if n.Left != nil { switch v := n.Left.(type) { case tsl.Node: - newLeftNode, err := extractConditionsWalk(v, tableName, conditions) + newLeftNode, err := extractConditionsWalk(v, conditions) if err != nil { return n, err } @@ -287,7 +294,7 @@ func extractConditionsWalk(n tsl.Node, tableName string, conditions *[]sq.Sqlize if n.Right != nil { switch v := n.Right.(type) { case tsl.Node: - newRightNode, err := extractConditionsWalk(v, tableName, conditions) + newRightNode, err := extractConditionsWalk(v, conditions) if err != nil { return n, err } @@ -295,7 +302,7 @@ func extractConditionsWalk(n tsl.Node, tableName string, conditions *[]sq.Sqlize case []tsl.Node: var newRightNodes []tsl.Node for _, rightNode := range v { - newRightNode, err := extractConditionsWalk(rightNode, tableName, conditions) + newRightNode, err := extractConditionsWalk(rightNode, conditions) if err != nil { return n, err } diff --git a/pkg/db/transactions.go b/pkg/db/transactions.go index 3a181d0..f59c884 100755 --- a/pkg/db/transactions.go +++ b/pkg/db/transactions.go @@ -11,7 +11,7 @@ import ( const defaultRollbackPolicy = false // newTransaction constructs a new Transaction object. -func newTransaction(ctx context.Context, connection SessionFactory) (*transaction.Transaction, error) { +func newTransaction(_ context.Context, connection SessionFactory) (*transaction.Transaction, error) { if connection == nil { // This happens in non-integration tests return nil, nil diff --git a/pkg/errors/errors.go b/pkg/errors/errors.go index 64b5025..5f997c4 100755 --- a/pkg/errors/errors.go +++ b/pkg/errors/errors.go @@ -36,9 +36,9 @@ const ( CodeValidationRange = "HYPERFLEET-VAL-004" // Authentication errors (AUT) - 401 - CodeAuthNoCredentials = "HYPERFLEET-AUT-001" - CodeAuthInvalidCredentials = "HYPERFLEET-AUT-002" - CodeAuthExpiredToken = "HYPERFLEET-AUT-003" + CodeAuthNoCredentials = "HYPERFLEET-AUT-001" //nolint:gosec // Not actual credentials, just error code names + CodeAuthInvalidCredentials = "HYPERFLEET-AUT-002" //nolint:gosec // Not actual credentials, just error code names + CodeAuthExpiredToken = "HYPERFLEET-AUT-003" //nolint:gosec // Not actual credentials, just error code names // Authorization errors (AUZ) - 403 CodeAuthzInsufficient = "HYPERFLEET-AUZ-001" @@ -107,44 +107,97 @@ type errorDefinition struct { var errorDefinitions = map[string]errorDefinition{ // Authentication errors (AUT) - 401 - CodeAuthNoCredentials: {ErrorTypeAuth, "Authentication Required", "Account authentication could not be verified", http.StatusUnauthorized}, - CodeAuthInvalidCredentials: {ErrorTypeAuth, "Invalid Credentials", "The provided credentials are invalid", http.StatusUnauthorized}, - CodeAuthExpiredToken: {ErrorTypeAuth, "Invalid Token", "Invalid token provided", http.StatusUnauthorized}, + CodeAuthNoCredentials: { + ErrorTypeAuth, "Authentication Required", + "Account authentication could not be verified", http.StatusUnauthorized, + }, + CodeAuthInvalidCredentials: { + ErrorTypeAuth, "Invalid Credentials", "The provided credentials are invalid", http.StatusUnauthorized, + }, + CodeAuthExpiredToken: { + ErrorTypeAuth, "Invalid Token", "Invalid token provided", http.StatusUnauthorized, + }, // Authorization errors (AUZ) - 403 - CodeAuthzInsufficient: {ErrorTypeAuthz, "Unauthorized", "Account is unauthorized to perform this action", http.StatusForbidden}, - CodeAuthzForbidden: {ErrorTypeAuthz, "Forbidden", "Forbidden to perform this action", http.StatusForbidden}, + CodeAuthzInsufficient: { + ErrorTypeAuthz, "Unauthorized", "Account is unauthorized to perform this action", http.StatusForbidden, + }, + CodeAuthzForbidden: { + ErrorTypeAuthz, "Forbidden", "Forbidden to perform this action", http.StatusForbidden, + }, // Validation errors (VAL) - 400 - CodeValidationMultiple: {ErrorTypeValidation, "Validation Failed", "Multiple validation errors occurred", http.StatusBadRequest}, - CodeValidationRequired: {ErrorTypeValidation, "Required Field Missing", "A required field is missing", http.StatusBadRequest}, - CodeValidationInvalid: {ErrorTypeValidation, "Invalid Value", "The provided value is invalid", http.StatusBadRequest}, - CodeValidationFormat: {ErrorTypeValidation, "Invalid Format", "The provided value has an invalid format", http.StatusBadRequest}, - CodeValidationRange: {ErrorTypeValidation, "Value Out of Range", "The provided value is out of the allowed range", http.StatusBadRequest}, - CodeBadRequest: {ErrorTypeBadRequest, "Bad Request", "Bad request", http.StatusBadRequest}, - CodeMalformedBody: {ErrorTypeMalformed, "Malformed Request", "Unable to read request body", http.StatusBadRequest}, - CodeSearchParseFail: {ErrorTypeValidation, "Invalid Search Query", "Failed to parse search query", http.StatusBadRequest}, + CodeValidationMultiple: { + ErrorTypeValidation, "Validation Failed", "Multiple validation errors occurred", http.StatusBadRequest, + }, + CodeValidationRequired: { + ErrorTypeValidation, "Required Field Missing", "A required field is missing", http.StatusBadRequest, + }, + CodeValidationInvalid: { + ErrorTypeValidation, "Invalid Value", "The provided value is invalid", http.StatusBadRequest, + }, + CodeValidationFormat: { + ErrorTypeValidation, "Invalid Format", "The provided value has an invalid format", http.StatusBadRequest, + }, + CodeValidationRange: { + ErrorTypeValidation, "Value Out of Range", + "The provided value is out of the allowed range", http.StatusBadRequest, + }, + CodeBadRequest: { + ErrorTypeBadRequest, "Bad Request", "Bad request", http.StatusBadRequest, + }, + CodeMalformedBody: { + ErrorTypeMalformed, "Malformed Request", "Unable to read request body", http.StatusBadRequest, + }, + CodeSearchParseFail: { + ErrorTypeValidation, "Invalid Search Query", "Failed to parse search query", http.StatusBadRequest, + }, // Not Found errors (NTF) - 404 - CodeNotFoundGeneric: {ErrorTypeNotFound, "Resource Not Found", "Resource not found", http.StatusNotFound}, - CodeNotFoundCluster: {ErrorTypeNotFound, "Cluster Not Found", "The specified cluster was not found", http.StatusNotFound}, - CodeNotFoundNodePool: {ErrorTypeNotFound, "NodePool Not Found", "The specified node pool was not found", http.StatusNotFound}, + CodeNotFoundGeneric: { + ErrorTypeNotFound, "Resource Not Found", "Resource not found", http.StatusNotFound, + }, + CodeNotFoundCluster: { + ErrorTypeNotFound, "Cluster Not Found", "The specified cluster was not found", http.StatusNotFound, + }, + CodeNotFoundNodePool: { + ErrorTypeNotFound, "NodePool Not Found", "The specified node pool was not found", http.StatusNotFound, + }, // Conflict errors (CNF) - 409 - CodeConflictExists: {ErrorTypeConflict, "Resource Conflict", "An entity with the specified unique values already exists", http.StatusConflict}, - CodeConflictVersion: {ErrorTypeConflict, "Version Conflict", "The resource version does not match", http.StatusConflict}, + CodeConflictExists: { + ErrorTypeConflict, "Resource Conflict", + "An entity with the specified unique values already exists", http.StatusConflict, + }, + CodeConflictVersion: { + ErrorTypeConflict, "Version Conflict", "The resource version does not match", http.StatusConflict, + }, // Rate Limit errors (LMT) - 429 - CodeRateLimitExceeded: {ErrorTypeRateLimit, "Rate Limit Exceeded", "Too many requests, please try again later", http.StatusTooManyRequests}, + CodeRateLimitExceeded: { + ErrorTypeRateLimit, "Rate Limit Exceeded", + "Too many requests, please try again later", http.StatusTooManyRequests, + }, // Internal errors (INT) - 500/501 - CodeInternalGeneral: {ErrorTypeInternal, "Internal Server Error", "Unspecified error", http.StatusInternalServerError}, - CodeInternalDatabase: {ErrorTypeInternal, "Database Error", "A database error occurred", http.StatusInternalServerError}, - CodeNotImplemented: {ErrorTypeNotImpl, "Not Implemented", "Functionality not implemented", http.StatusNotImplemented}, + CodeInternalGeneral: { + ErrorTypeInternal, "Internal Server Error", "Unspecified error", http.StatusInternalServerError, + }, + CodeInternalDatabase: { + ErrorTypeInternal, "Database Error", "A database error occurred", http.StatusInternalServerError, + }, + CodeNotImplemented: { + ErrorTypeNotImpl, "Not Implemented", "Functionality not implemented", http.StatusNotImplemented, + }, // Service errors (SVC) - 503/504 - CodeServiceUnavailable: {ErrorTypeService, "Service Unavailable", "The service is temporarily unavailable", http.StatusServiceUnavailable}, - CodeServiceTimeout: {ErrorTypeService, "Service Timeout", "The service request timed out", http.StatusGatewayTimeout}, + CodeServiceUnavailable: { + ErrorTypeService, "Service Unavailable", + "The service is temporarily unavailable", http.StatusServiceUnavailable, + }, + CodeServiceTimeout: { + ErrorTypeService, "Service Timeout", "The service request timed out", http.StatusGatewayTimeout, + }, } // Find looks up an error definition by its RFC 9457 code diff --git a/pkg/handlers/cluster_nodepools.go b/pkg/handlers/cluster_nodepools.go index 5d79fa2..03347a6 100644 --- a/pkg/handlers/cluster_nodepools.go +++ b/pkg/handlers/cluster_nodepools.go @@ -18,7 +18,11 @@ type clusterNodePoolsHandler struct { generic services.GenericService } -func NewClusterNodePoolsHandler(clusterService services.ClusterService, nodePoolService services.NodePoolService, generic services.GenericService) *clusterNodePoolsHandler { +func NewClusterNodePoolsHandler( + clusterService services.ClusterService, + nodePoolService services.NodePoolService, + generic services.GenericService, +) *clusterNodePoolsHandler { return &clusterNodePoolsHandler{ clusterService: clusterService, nodePoolService: nodePoolService, diff --git a/pkg/handlers/cluster_nodepools_test.go b/pkg/handlers/cluster_nodepools_test.go index 952ea89..90b4f2f 100644 --- a/pkg/handlers/cluster_nodepools_test.go +++ b/pkg/handlers/cluster_nodepools_test.go @@ -28,7 +28,9 @@ func TestClusterNodePoolsHandler_Get(t *testing.T) { name string clusterID string nodePoolID string - setupMocks func(ctrl *gomock.Controller) (*services.MockClusterService, *services.MockNodePoolService, *services.MockGenericService) + setupMocks func(ctrl *gomock.Controller) ( + *services.MockClusterService, *services.MockNodePoolService, *services.MockGenericService, + ) expectedStatusCode int expectedError bool }{ @@ -36,7 +38,9 @@ func TestClusterNodePoolsHandler_Get(t *testing.T) { name: "Success - Get nodepool by cluster and nodepool ID", clusterID: clusterID, nodePoolID: nodePoolID, - setupMocks: func(ctrl *gomock.Controller) (*services.MockClusterService, *services.MockNodePoolService, *services.MockGenericService) { + setupMocks: func(ctrl *gomock.Controller) ( + *services.MockClusterService, *services.MockNodePoolService, *services.MockGenericService, + ) { mockClusterSvc := services.NewMockClusterService(ctrl) mockNodePoolSvc := services.NewMockNodePoolService(ctrl) mockGenericSvc := services.NewMockGenericService(ctrl) @@ -75,7 +79,9 @@ func TestClusterNodePoolsHandler_Get(t *testing.T) { name: "Error - Cluster not found", clusterID: "non-existent", nodePoolID: nodePoolID, - setupMocks: func(ctrl *gomock.Controller) (*services.MockClusterService, *services.MockNodePoolService, *services.MockGenericService) { + setupMocks: func(ctrl *gomock.Controller) ( + *services.MockClusterService, *services.MockNodePoolService, *services.MockGenericService, + ) { mockClusterSvc := services.NewMockClusterService(ctrl) mockNodePoolSvc := services.NewMockNodePoolService(ctrl) mockGenericSvc := services.NewMockGenericService(ctrl) @@ -91,7 +97,9 @@ func TestClusterNodePoolsHandler_Get(t *testing.T) { name: "Error - NodePool not found", clusterID: clusterID, nodePoolID: "non-existent", - setupMocks: func(ctrl *gomock.Controller) (*services.MockClusterService, *services.MockNodePoolService, *services.MockGenericService) { + setupMocks: func(ctrl *gomock.Controller) ( + *services.MockClusterService, *services.MockNodePoolService, *services.MockGenericService, + ) { mockClusterSvc := services.NewMockClusterService(ctrl) mockNodePoolSvc := services.NewMockNodePoolService(ctrl) mockGenericSvc := services.NewMockGenericService(ctrl) @@ -116,7 +124,9 @@ func TestClusterNodePoolsHandler_Get(t *testing.T) { name: "Error - NodePool belongs to different cluster", clusterID: clusterID, nodePoolID: nodePoolID, - setupMocks: func(ctrl *gomock.Controller) (*services.MockClusterService, *services.MockNodePoolService, *services.MockGenericService) { + setupMocks: func(ctrl *gomock.Controller) ( + *services.MockClusterService, *services.MockNodePoolService, *services.MockGenericService, + ) { mockClusterSvc := services.NewMockClusterService(ctrl) mockNodePoolSvc := services.NewMockNodePoolService(ctrl) mockGenericSvc := services.NewMockGenericService(ctrl) @@ -163,7 +173,8 @@ func TestClusterNodePoolsHandler_Get(t *testing.T) { handler := NewClusterNodePoolsHandler(mockClusterSvc, mockNodePoolSvc, mockGenericSvc) // Create request - req := httptest.NewRequest(http.MethodGet, "/api/hyperfleet/v1/clusters/"+tt.clusterID+"/nodepools/"+tt.nodePoolID, nil) + reqURL := "/api/hyperfleet/v1/clusters/" + tt.clusterID + "/nodepools/" + tt.nodePoolID + req := httptest.NewRequest(http.MethodGet, reqURL, nil) req = mux.SetURLVars(req, map[string]string{ "id": tt.clusterID, "nodepool_id": tt.nodePoolID, diff --git a/pkg/handlers/cluster_status.go b/pkg/handlers/cluster_status.go index c7636b9..9a7a363 100644 --- a/pkg/handlers/cluster_status.go +++ b/pkg/handlers/cluster_status.go @@ -16,7 +16,10 @@ type clusterStatusHandler struct { clusterService services.ClusterService } -func NewClusterStatusHandler(adapterStatusService services.AdapterStatusService, clusterService services.ClusterService) *clusterStatusHandler { +func NewClusterStatusHandler( + adapterStatusService services.AdapterStatusService, + clusterService services.ClusterService, +) *clusterStatusHandler { return &clusterStatusHandler{ adapterStatusService: adapterStatusService, clusterService: clusterService, diff --git a/pkg/handlers/nodepool_status.go b/pkg/handlers/nodepool_status.go index 0c1fffe..e6ded0c 100644 --- a/pkg/handlers/nodepool_status.go +++ b/pkg/handlers/nodepool_status.go @@ -17,7 +17,10 @@ type nodePoolStatusHandler struct { nodePoolService services.NodePoolService } -func NewNodePoolStatusHandler(adapterStatusService services.AdapterStatusService, nodePoolService services.NodePoolService) *nodePoolStatusHandler { +func NewNodePoolStatusHandler( + adapterStatusService services.AdapterStatusService, + nodePoolService services.NodePoolService, +) *nodePoolStatusHandler { return &nodePoolStatusHandler{ adapterStatusService: adapterStatusService, nodePoolService: nodePoolService, diff --git a/pkg/handlers/validation.go b/pkg/handlers/validation.go index 097ea1e..4a9bdfc 100755 --- a/pkg/handlers/validation.go +++ b/pkg/handlers/validation.go @@ -44,6 +44,8 @@ func validateEmpty(i interface{}, fieldName string, field string) validate { } // validateName validates that a name field matches the pattern ^[a-z0-9-]+$ and length constraints +// +//nolint:unparam // fieldName is kept as parameter for flexibility even though currently only "Name" is used func validateName(i interface{}, fieldName string, field string, minLen, maxLen int) validate { return func() *errors.ServiceError { value := reflect.ValueOf(i).Elem().FieldByName(fieldName) @@ -73,7 +75,10 @@ func validateName(i interface{}, fieldName string, field string, minLen, maxLen // Check pattern: lowercase alphanumeric and hyphens only if !namePattern.MatchString(name) { - return errors.Validation("%s must start and end with lowercase letter or number, and contain only lowercase letters, numbers, and hyphens", field) + return errors.Validation( + "%s must start and end with lowercase letter or number, and contain only "+ + "lowercase letters, numbers, and hyphens", field, + ) } return nil @@ -81,6 +86,8 @@ func validateName(i interface{}, fieldName string, field string, minLen, maxLen } // validateKind validates that the kind field matches the expected value +// +//nolint:unparam // fieldName is kept as parameter for flexibility even though currently only "Kind" is used func validateKind(i interface{}, fieldName string, field string, expectedKind string) validate { return func() *errors.ServiceError { value := reflect.ValueOf(i).Elem().FieldByName(fieldName) diff --git a/pkg/logger/gorm_logger.go b/pkg/logger/gorm_logger.go index 8e60dc0..407995f 100644 --- a/pkg/logger/gorm_logger.go +++ b/pkg/logger/gorm_logger.go @@ -46,7 +46,12 @@ func (l *GormLogger) Error(ctx context.Context, msg string, data ...interface{}) } } -func (l *GormLogger) Trace(ctx context.Context, begin time.Time, fc func() (sql string, rowsAffected int64), err error) { +func (l *GormLogger) Trace( + ctx context.Context, + begin time.Time, + fc func() (sql string, rowsAffected int64), + err error, +) { if l.logLevel <= gormlogger.Silent { return } diff --git a/pkg/logger/logger_test.go b/pkg/logger/logger_test.go index 9b873a8..6b157cb 100644 --- a/pkg/logger/logger_test.go +++ b/pkg/logger/logger_test.go @@ -10,6 +10,10 @@ import ( "testing" ) +const ( + testMessage = "test message" +) + // TestParseLogLevel tests log level parsing with various inputs func TestParseLogLevel(t *testing.T) { tests := []struct { @@ -147,7 +151,7 @@ func TestBasicLogFormat(t *testing.T) { InitGlobalLogger(cfg) ctx := context.Background() - With(ctx, "key", "value").Info("test message") + With(ctx, "key", "value").Info(testMessage) output := buf.String() if output == "" { @@ -166,8 +170,8 @@ func TestBasicLogFormat(t *testing.T) { } // Check required fields - if logEntry["message"] != "test message" { - t.Errorf("expected message 'test message', got %v", logEntry["message"]) + if logEntry["message"] != testMessage { + t.Errorf("expected message %q, got %v", testMessage, logEntry["message"]) } if logEntry["level"] != "info" { t.Errorf("expected level 'info', got %v", logEntry["level"]) @@ -186,8 +190,8 @@ func TestBasicLogFormat(t *testing.T) { } } else { // Text format - check for stable invariants only - if !strings.Contains(output, "test message") { - t.Errorf("expected output to contain 'test message', got: %s", output) + if !strings.Contains(output, testMessage) { + t.Errorf("expected output to contain %q, got: %s", testMessage, output) } // Check for log level (case-insensitive) outputLower := strings.ToLower(output) @@ -373,10 +377,8 @@ func TestStackTrace(t *testing.T) { if len(stackTrace) == 0 { t.Error("expected stack_trace to have at least one frame") } - } else { - if logEntry["stack_trace"] != nil { - t.Error("expected no stack_trace field for non-error level") - } + } else if logEntry["stack_trace"] != nil { + t.Error("expected no stack_trace field for non-error level") } }) } @@ -552,10 +554,8 @@ func TestWithError(t *testing.T) { if logEntry["error"] != tt.expectedValue { t.Errorf("expected error '%s', got %v", tt.expectedValue, logEntry["error"]) } - } else { - if logEntry["error"] != nil { - t.Errorf("expected no error field for nil error, got %v", logEntry["error"]) - } + } else if logEntry["error"] != nil { + t.Errorf("expected no error field for nil error, got %v", logEntry["error"]) } }) } diff --git a/pkg/logger/text_handler.go b/pkg/logger/text_handler.go index b69fd14..d0afd46 100644 --- a/pkg/logger/text_handler.go +++ b/pkg/logger/text_handler.go @@ -27,7 +27,9 @@ type HyperFleetTextHandler struct { } // NewHyperFleetTextHandler creates a new text handler conforming to HyperFleet Logging Specification -func NewHyperFleetTextHandler(w io.Writer, component, version, hostname string, level slog.Level) *HyperFleetTextHandler { +func NewHyperFleetTextHandler( + w io.Writer, component, version, hostname string, level slog.Level, +) *HyperFleetTextHandler { return &HyperFleetTextHandler{ w: w, component: component, diff --git a/pkg/logger/text_handler_test.go b/pkg/logger/text_handler_test.go index fed1ebd..becb04c 100644 --- a/pkg/logger/text_handler_test.go +++ b/pkg/logger/text_handler_test.go @@ -95,7 +95,9 @@ func TestHyperFleetTextHandler_SpecialCharacters(t *testing.T) { } // Value with internal quotes should be escaped and quoted - if !strings.Contains(output, `with_quotes="contains \"quotes\""`) && !strings.Contains(output, `with_quotes="contains \\\"quotes\\\""`) { + hasQuotes := strings.Contains(output, `with_quotes="contains \"quotes\""`) || + strings.Contains(output, `with_quotes="contains \\\"quotes\\\""`) + if !hasQuotes { t.Errorf("expected escaped quotes, got: %s", output) } } @@ -109,10 +111,26 @@ func TestHyperFleetTextHandler_LogLevels(t *testing.T) { expectedLevel string shouldLog bool }{ - {"DEBUG enabled", slog.LevelDebug, func(l *slog.Logger, ctx context.Context, msg string) { l.DebugContext(ctx, msg) }, "DEBUG", true}, - {"INFO enabled", slog.LevelInfo, func(l *slog.Logger, ctx context.Context, msg string) { l.InfoContext(ctx, msg) }, "INFO", true}, - {"WARN enabled", slog.LevelWarn, func(l *slog.Logger, ctx context.Context, msg string) { l.WarnContext(ctx, msg) }, "WARN", true}, - {"ERROR enabled", slog.LevelError, func(l *slog.Logger, ctx context.Context, msg string) { l.ErrorContext(ctx, msg) }, "ERROR", true}, + { + "DEBUG enabled", slog.LevelDebug, + func(l *slog.Logger, ctx context.Context, msg string) { l.DebugContext(ctx, msg) }, + "DEBUG", true, + }, + { + "INFO enabled", slog.LevelInfo, + func(l *slog.Logger, ctx context.Context, msg string) { l.InfoContext(ctx, msg) }, + "INFO", true, + }, + { + "WARN enabled", slog.LevelWarn, + func(l *slog.Logger, ctx context.Context, msg string) { l.WarnContext(ctx, msg) }, + "WARN", true, + }, + { + "ERROR enabled", slog.LevelError, + func(l *slog.Logger, ctx context.Context, msg string) { l.ErrorContext(ctx, msg) }, + "ERROR", true, + }, } for _, tt := range tests { diff --git a/pkg/middleware/masking.go b/pkg/middleware/masking.go index a5ff312..d8ec724 100644 --- a/pkg/middleware/masking.go +++ b/pkg/middleware/masking.go @@ -30,7 +30,9 @@ var ( emailPattern = regexp.MustCompile(`[a-zA-Z0-9._%+\-]+@[a-zA-Z0-9.\-]+\.[a-zA-Z]{2,}`) creditCardPattern = regexp.MustCompile(`\b\d{4}[\s\-]?\d{4}[\s\-]?\d{4}[\s\-]?\d{4}\b`) // Matches common API key/token formats: Bearer tokens, API keys, etc. - apiKeyPattern = regexp.MustCompile(`(?i)(bearer\s+|api[_\-]?key[\s:=]+|token[\s:=]+|secret[\s:=]+|password[\s:=]+)[a-zA-Z0-9\-_\.+/=]+`) + apiKeyPattern = regexp.MustCompile( + `(?i)(bearer\s+|api[_\-]?key[\s:=]+|token[\s:=]+|secret[\s:=]+|password[\s:=]+)[a-zA-Z0-9\-_\.+/=]+`, + ) // Matches form-encoded sensitive key-values formEncodedPattern = regexp.MustCompile(`(?i)(password|token|secret|api[_\-]?key)=[^&\s]+`) ) @@ -115,7 +117,8 @@ func (m *MaskingMiddleware) MaskBody(body []byte) []byte { var data interface{} if err := json.Unmarshal(body, &data); err != nil { // JSON parsing failed - use text-based fallback masking to prevent leakage - logger.WithError(context.Background(), err).Warn("JSON parsing failed in MaskBody, applying text-based fallback masking") + logger.WithError(context.Background(), err). + Warn("JSON parsing failed in MaskBody, applying text-based fallback masking") return m.maskTextFallback(body) } @@ -124,7 +127,8 @@ func (m *MaskingMiddleware) MaskBody(body []byte) []byte { masked, err := json.Marshal(data) if err != nil { // JSON marshaling failed - use text-based fallback masking to prevent leakage - logger.WithError(context.Background(), err).Warn("JSON marshaling failed in MaskBody, applying text-based fallback masking") + logger.WithError(context.Background(), err). + Warn("JSON marshaling failed in MaskBody, applying text-based fallback masking") return m.maskTextFallback(body) } return masked diff --git a/pkg/middleware/masking_test.go b/pkg/middleware/masking_test.go index 0810f65..719ef78 100644 --- a/pkg/middleware/masking_test.go +++ b/pkg/middleware/masking_test.go @@ -96,7 +96,10 @@ func TestMaskHeaders(t *testing.T) { if resultValues, ok := result[key]; !ok { t.Errorf("expected header %s not found in result", key) } else if len(resultValues) != len(expectedValues) { - t.Errorf("header %s length = %d, want %d (values: %v vs %v)", key, len(resultValues), len(expectedValues), resultValues, expectedValues) + t.Errorf( + "header %s length = %d, want %d (values: %v vs %v)", + key, len(resultValues), len(expectedValues), resultValues, expectedValues, + ) } else { // Compare all values in the slice for i := range expectedValues { @@ -158,7 +161,8 @@ func TestMaskBody(t *testing.T) { name: "mask multiple sensitive fields", enabled: true, body: `{"password":"pass","secret":"sec","token":"tok","api_key":"key","normal":"value"}`, - expected: `{"api_key":"***REDACTED***","normal":"value","password":"***REDACTED***","secret":"***REDACTED***","token":"***REDACTED***"}`, + expected: `{"api_key":"***REDACTED***","normal":"value",` + + `"password":"***REDACTED***","secret":"***REDACTED***","token":"***REDACTED***"}`, }, { name: "case insensitive field matching", @@ -241,7 +245,8 @@ func TestMaskBody(t *testing.T) { result := m.MaskBody([]byte(tt.body)) // For JSON objects, compare as maps to handle key ordering - if tt.body != "" && tt.body[0] == '{' { + switch { + case tt.body != "" && tt.body[0] == '{': var resultMap, expectedMap map[string]interface{} if err := json.Unmarshal(result, &resultMap); err != nil { t.Fatalf("failed to unmarshal result: %v", err) @@ -254,7 +259,7 @@ func TestMaskBody(t *testing.T) { if !deepEqual(resultMap, expectedMap) { t.Errorf("MaskBody() = %s, want %s", result, tt.expected) } - } else if tt.body != "" && tt.body[0] == '[' { + case tt.body != "" && tt.body[0] == '[': // For JSON arrays, compare as slices var resultArray, expectedArray []interface{} if err := json.Unmarshal(result, &resultArray); err != nil { @@ -268,7 +273,7 @@ func TestMaskBody(t *testing.T) { if !deepEqualSlice(resultArray, expectedArray) { t.Errorf("MaskBody() = %s, want %s", result, tt.expected) } - } else { + default: // For non-JSON, compare as strings if string(result) != tt.expected { t.Errorf("MaskBody() = %s, want %s", result, tt.expected) diff --git a/pkg/middleware/schema_validation_test.go b/pkg/middleware/schema_validation_test.go index 27e3908..811f85e 100644 --- a/pkg/middleware/schema_validation_test.go +++ b/pkg/middleware/schema_validation_test.go @@ -379,7 +379,7 @@ func TestSchemaValidationMiddleware_MalformedJSON(t *testing.T) { func setupTestValidator(t *testing.T) *validators.SchemaValidator { tmpDir := t.TempDir() schemaPath := filepath.Join(tmpDir, "test-schema.yaml") - err := os.WriteFile(schemaPath, []byte(testSchema), 0644) + err := os.WriteFile(schemaPath, []byte(testSchema), 0600) if err != nil { t.Fatalf("Failed to create test schema: %v", err) } diff --git a/pkg/services/adapter_status.go b/pkg/services/adapter_status.go index e496be5..f566909 100644 --- a/pkg/services/adapter_status.go +++ b/pkg/services/adapter_status.go @@ -16,9 +16,15 @@ type AdapterStatusService interface { Replace(ctx context.Context, adapterStatus *api.AdapterStatus) (*api.AdapterStatus, *errors.ServiceError) Upsert(ctx context.Context, adapterStatus *api.AdapterStatus) (*api.AdapterStatus, *errors.ServiceError) Delete(ctx context.Context, id string) *errors.ServiceError - FindByResource(ctx context.Context, resourceType, resourceID string) (api.AdapterStatusList, *errors.ServiceError) - FindByResourcePaginated(ctx context.Context, resourceType, resourceID string, listArgs *ListArguments) (api.AdapterStatusList, int64, *errors.ServiceError) - FindByResourceAndAdapter(ctx context.Context, resourceType, resourceID, adapter string) (*api.AdapterStatus, *errors.ServiceError) + FindByResource( + ctx context.Context, resourceType, resourceID string, + ) (api.AdapterStatusList, *errors.ServiceError) + FindByResourcePaginated( + ctx context.Context, resourceType, resourceID string, listArgs *ListArguments, + ) (api.AdapterStatusList, int64, *errors.ServiceError) + FindByResourceAndAdapter( + ctx context.Context, resourceType, resourceID, adapter string, + ) (*api.AdapterStatus, *errors.ServiceError) All(ctx context.Context) (api.AdapterStatusList, *errors.ServiceError) } @@ -42,7 +48,9 @@ func (s *sqlAdapterStatusService) Get(ctx context.Context, id string) (*api.Adap return adapterStatus, nil } -func (s *sqlAdapterStatusService) Create(ctx context.Context, adapterStatus *api.AdapterStatus) (*api.AdapterStatus, *errors.ServiceError) { +func (s *sqlAdapterStatusService) Create( + ctx context.Context, adapterStatus *api.AdapterStatus, +) (*api.AdapterStatus, *errors.ServiceError) { adapterStatus, err := s.adapterStatusDao.Create(ctx, adapterStatus) if err != nil { return nil, handleCreateError("AdapterStatus", err) @@ -50,7 +58,9 @@ func (s *sqlAdapterStatusService) Create(ctx context.Context, adapterStatus *api return adapterStatus, nil } -func (s *sqlAdapterStatusService) Replace(ctx context.Context, adapterStatus *api.AdapterStatus) (*api.AdapterStatus, *errors.ServiceError) { +func (s *sqlAdapterStatusService) Replace( + ctx context.Context, adapterStatus *api.AdapterStatus, +) (*api.AdapterStatus, *errors.ServiceError) { adapterStatus, err := s.adapterStatusDao.Replace(ctx, adapterStatus) if err != nil { return nil, handleUpdateError("AdapterStatus", err) @@ -58,7 +68,9 @@ func (s *sqlAdapterStatusService) Replace(ctx context.Context, adapterStatus *ap return adapterStatus, nil } -func (s *sqlAdapterStatusService) Upsert(ctx context.Context, adapterStatus *api.AdapterStatus) (*api.AdapterStatus, *errors.ServiceError) { +func (s *sqlAdapterStatusService) Upsert( + ctx context.Context, adapterStatus *api.AdapterStatus, +) (*api.AdapterStatus, *errors.ServiceError) { adapterStatus, err := s.adapterStatusDao.Upsert(ctx, adapterStatus) if err != nil { return nil, handleCreateError("AdapterStatus", err) @@ -73,7 +85,9 @@ func (s *sqlAdapterStatusService) Delete(ctx context.Context, id string) *errors return nil } -func (s *sqlAdapterStatusService) FindByResource(ctx context.Context, resourceType, resourceID string) (api.AdapterStatusList, *errors.ServiceError) { +func (s *sqlAdapterStatusService) FindByResource( + ctx context.Context, resourceType, resourceID string, +) (api.AdapterStatusList, *errors.ServiceError) { statuses, err := s.adapterStatusDao.FindByResource(ctx, resourceType, resourceID) if err != nil { return nil, errors.GeneralError("Unable to get adapter statuses: %s", err) @@ -81,7 +95,9 @@ func (s *sqlAdapterStatusService) FindByResource(ctx context.Context, resourceTy return statuses, nil } -func (s *sqlAdapterStatusService) FindByResourcePaginated(ctx context.Context, resourceType, resourceID string, listArgs *ListArguments) (api.AdapterStatusList, int64, *errors.ServiceError) { +func (s *sqlAdapterStatusService) FindByResourcePaginated( + ctx context.Context, resourceType, resourceID string, listArgs *ListArguments, +) (api.AdapterStatusList, int64, *errors.ServiceError) { offset := (listArgs.Page - 1) * int(listArgs.Size) limit := int(listArgs.Size) @@ -93,7 +109,9 @@ func (s *sqlAdapterStatusService) FindByResourcePaginated(ctx context.Context, r return statuses, total, nil } -func (s *sqlAdapterStatusService) FindByResourceAndAdapter(ctx context.Context, resourceType, resourceID, adapter string) (*api.AdapterStatus, *errors.ServiceError) { +func (s *sqlAdapterStatusService) FindByResourceAndAdapter( + ctx context.Context, resourceType, resourceID, adapter string, +) (*api.AdapterStatus, *errors.ServiceError) { status, err := s.adapterStatusDao.FindByResourceAndAdapter(ctx, resourceType, resourceID, adapter) if err != nil { return nil, handleGetError("AdapterStatus", "adapter", adapter, err) diff --git a/pkg/services/cluster.go b/pkg/services/cluster.go index d969379..9d85135 100644 --- a/pkg/services/cluster.go +++ b/pkg/services/cluster.go @@ -32,14 +32,20 @@ type ClusterService interface { // ProcessAdapterStatus handles the business logic for adapter status: // - If Available condition is "Unknown": returns (nil, nil) indicating no-op // - Otherwise: upserts the status and triggers aggregation - ProcessAdapterStatus(ctx context.Context, clusterID string, adapterStatus *api.AdapterStatus) (*api.AdapterStatus, *errors.ServiceError) + ProcessAdapterStatus( + ctx context.Context, clusterID string, adapterStatus *api.AdapterStatus, + ) (*api.AdapterStatus, *errors.ServiceError) // idempotent functions for the control plane, but can also be called synchronously by any actor OnUpsert(ctx context.Context, id string) error OnDelete(ctx context.Context, id string) error } -func NewClusterService(clusterDao dao.ClusterDao, adapterStatusDao dao.AdapterStatusDao, adapterConfig *config.AdapterRequirementsConfig) ClusterService { +func NewClusterService( + clusterDao dao.ClusterDao, + adapterStatusDao dao.AdapterStatusDao, + adapterConfig *config.AdapterRequirementsConfig, +) ClusterService { return &sqlClusterService{ clusterDao: clusterDao, adapterStatusDao: adapterStatusDao, @@ -136,7 +142,9 @@ func (s *sqlClusterService) OnDelete(ctx context.Context, id string) error { } // UpdateClusterStatusFromAdapters aggregates adapter statuses into cluster status -func (s *sqlClusterService) UpdateClusterStatusFromAdapters(ctx context.Context, clusterID string) (*api.Cluster, *errors.ServiceError) { +func (s *sqlClusterService) UpdateClusterStatusFromAdapters( + ctx context.Context, clusterID string, +) (*api.Cluster, *errors.ServiceError) { // Get the cluster cluster, err := s.clusterDao.Get(ctx, clusterID) if err != nil { @@ -231,8 +239,12 @@ func (s *sqlClusterService) UpdateClusterStatusFromAdapters(ctx context.Context, // ProcessAdapterStatus handles the business logic for adapter status: // - If Available condition is "Unknown": returns (nil, nil) indicating no-op // - Otherwise: upserts the status and triggers aggregation -func (s *sqlClusterService) ProcessAdapterStatus(ctx context.Context, clusterID string, adapterStatus *api.AdapterStatus) (*api.AdapterStatus, *errors.ServiceError) { - existingStatus, findErr := s.adapterStatusDao.FindByResourceAndAdapter(ctx, "Cluster", clusterID, adapterStatus.Adapter) +func (s *sqlClusterService) ProcessAdapterStatus( + ctx context.Context, clusterID string, adapterStatus *api.AdapterStatus, +) (*api.AdapterStatus, *errors.ServiceError) { + existingStatus, findErr := s.adapterStatusDao.FindByResourceAndAdapter( + ctx, "Cluster", clusterID, adapterStatus.Adapter, + ) if findErr != nil && !stderrors.Is(findErr, gorm.ErrRecordNotFound) { if !strings.Contains(findErr.Error(), errors.CodeNotFoundGeneric) { return nil, errors.GeneralError("Failed to get adapter status: %s", findErr) @@ -275,7 +287,9 @@ func (s *sqlClusterService) ProcessAdapterStatus(ctx context.Context, clusterID // If the adapter status doesn't include Available (e.g. it only reports Ready/Progressing), // saving it should not overwrite the cluster's synthetic Available/Ready conditions. if hasAvailableCondition { - if _, aggregateErr := s.UpdateClusterStatusFromAdapters(ctx, clusterID); aggregateErr != nil { + if _, aggregateErr := s.UpdateClusterStatusFromAdapters( + ctx, clusterID, + ); aggregateErr != nil { // Log error but don't fail the request - the status will be computed on next update ctx = logger.WithClusterID(ctx, clusterID) logger.WithError(ctx, aggregateErr).Warn("Failed to aggregate cluster status") diff --git a/pkg/services/cluster_test.go b/pkg/services/cluster_test.go index 0948754..4c9f436 100644 --- a/pkg/services/cluster_test.go +++ b/pkg/services/cluster_test.go @@ -14,6 +14,10 @@ import ( "github.com/openshift-hyperfleet/hyperfleet-api/pkg/errors" ) +const ( + testClusterID = "test-cluster-id" +) + // Mock implementations for testing ProcessAdapterStatus type mockClusterDao struct { @@ -107,7 +111,10 @@ func (d *mockAdapterStatusDao) Delete(ctx context.Context, id string) error { return nil } -func (d *mockAdapterStatusDao) FindByResource(ctx context.Context, resourceType, resourceID string) (api.AdapterStatusList, error) { +func (d *mockAdapterStatusDao) FindByResource( + ctx context.Context, + resourceType, resourceID string, +) (api.AdapterStatusList, error) { var result api.AdapterStatusList for _, s := range d.statuses { if s.ResourceType == resourceType && s.ResourceID == resourceID { @@ -117,12 +124,19 @@ func (d *mockAdapterStatusDao) FindByResource(ctx context.Context, resourceType, return result, nil } -func (d *mockAdapterStatusDao) FindByResourcePaginated(ctx context.Context, resourceType, resourceID string, offset, limit int) (api.AdapterStatusList, int64, error) { +func (d *mockAdapterStatusDao) FindByResourcePaginated( + ctx context.Context, + resourceType, resourceID string, + offset, limit int, +) (api.AdapterStatusList, int64, error) { statuses, _ := d.FindByResource(ctx, resourceType, resourceID) return statuses, int64(len(statuses)), nil } -func (d *mockAdapterStatusDao) FindByResourceAndAdapter(ctx context.Context, resourceType, resourceID, adapter string) (*api.AdapterStatus, error) { +func (d *mockAdapterStatusDao) FindByResourceAndAdapter( + ctx context.Context, + resourceType, resourceID, adapter string, +) (*api.AdapterStatus, error) { for _, s := range d.statuses { if s.ResourceType == resourceType && s.ResourceID == resourceID && s.Adapter == adapter { return s, nil @@ -152,12 +166,12 @@ func TestProcessAdapterStatus_UnknownCondition(t *testing.T) { service := NewClusterService(clusterDao, adapterStatusDao, config) ctx := context.Background() - clusterID := "test-cluster-id" + clusterID := testClusterID // Create adapter status with Available=Unknown conditions := []api.AdapterCondition{ { - Type: "Available", + Type: conditionTypeAvailable, Status: api.AdapterConditionUnknown, LastTransitionTime: time.Now(), }, @@ -192,7 +206,7 @@ func TestProcessAdapterStatus_TrueCondition(t *testing.T) { service := NewClusterService(clusterDao, adapterStatusDao, config) ctx := context.Background() - clusterID := "test-cluster-id" + clusterID := testClusterID // Create the cluster first cluster := &api.Cluster{ @@ -205,7 +219,7 @@ func TestProcessAdapterStatus_TrueCondition(t *testing.T) { // Create adapter status with Available=True conditions := []api.AdapterCondition{ { - Type: "Available", + Type: conditionTypeAvailable, Status: api.AdapterConditionTrue, LastTransitionTime: time.Now(), }, @@ -243,7 +257,7 @@ func TestProcessAdapterStatus_FalseCondition(t *testing.T) { service := NewClusterService(clusterDao, adapterStatusDao, config) ctx := context.Background() - clusterID := "test-cluster-id" + clusterID := testClusterID // Create the cluster first cluster := &api.Cluster{ @@ -256,7 +270,7 @@ func TestProcessAdapterStatus_FalseCondition(t *testing.T) { // Create adapter status with Available=False conditions := []api.AdapterCondition{ { - Type: "Available", + Type: conditionTypeAvailable, Status: api.AdapterConditionFalse, LastTransitionTime: time.Now(), }, @@ -293,13 +307,13 @@ func TestProcessAdapterStatus_NoAvailableCondition(t *testing.T) { service := NewClusterService(clusterDao, adapterStatusDao, config) ctx := context.Background() - clusterID := "test-cluster-id" + clusterID := testClusterID // Create the cluster first fixedNow := time.Date(2026, 1, 1, 0, 0, 0, 0, time.UTC) initialConditions := []api.ResourceCondition{ { - Type: "Available", + Type: conditionTypeAvailable, Status: api.ConditionFalse, ObservedGeneration: 1, LastTransitionTime: fixedNow, @@ -324,7 +338,8 @@ func TestProcessAdapterStatus_NoAvailableCondition(t *testing.T) { cluster.ID = clusterID _, svcErr := service.Create(ctx, cluster) Expect(svcErr).To(BeNil()) - initialClusterStatusConditions := append(api.Cluster{}.StatusConditions, cluster.StatusConditions...) + initialClusterStatusConditions := api.Cluster{}.StatusConditions + initialClusterStatusConditions = append(initialClusterStatusConditions, cluster.StatusConditions...) // Create adapter status with Health condition (no Available) conditions := []api.AdapterCondition{ @@ -356,7 +371,8 @@ func TestProcessAdapterStatus_NoAvailableCondition(t *testing.T) { // Verify that saving a non-Available condition did not overwrite cluster Available/Ready storedCluster, _ := clusterDao.Get(ctx, clusterID) - Expect(storedCluster.StatusConditions).To(Equal(initialClusterStatusConditions), "Cluster status conditions should not be overwritten when adapter status lacks Available") + Expect(storedCluster.StatusConditions).To(Equal(initialClusterStatusConditions), + "Cluster status conditions should not be overwritten when adapter status lacks Available") } // TestProcessAdapterStatus_MultipleConditions_AvailableUnknown tests multiple conditions with Available=Unknown @@ -370,7 +386,7 @@ func TestProcessAdapterStatus_MultipleConditions_AvailableUnknown(t *testing.T) service := NewClusterService(clusterDao, adapterStatusDao, config) ctx := context.Background() - clusterID := "test-cluster-id" + clusterID := testClusterID // Create adapter status with multiple conditions including Available=Unknown conditions := []api.AdapterCondition{ @@ -380,7 +396,7 @@ func TestProcessAdapterStatus_MultipleConditions_AvailableUnknown(t *testing.T) LastTransitionTime: time.Now(), }, { - Type: "Available", + Type: conditionTypeAvailable, Status: api.AdapterConditionUnknown, LastTransitionTime: time.Now(), }, @@ -422,7 +438,7 @@ func TestClusterAvailableReadyTransitions(t *testing.T) { service := NewClusterService(clusterDao, adapterStatusDao, adapterConfig) ctx := context.Background() - clusterID := "test-cluster-id" + clusterID := testClusterID cluster := &api.Cluster{Generation: 1} cluster.ID = clusterID @@ -440,9 +456,9 @@ func TestClusterAvailableReadyTransitions(t *testing.T) { var available, ready *api.ResourceCondition for i := range conds { switch conds[i].Type { - case "Available": + case conditionTypeAvailable: available = &conds[i] - case "Ready": + case conditionTypeReady: ready = &conds[i] } } @@ -453,7 +469,7 @@ func TestClusterAvailableReadyTransitions(t *testing.T) { upsert := func(adapter string, available api.AdapterConditionStatus, observedGen int32) { conditions := []api.AdapterCondition{ - {Type: "Available", Status: available, LastTransitionTime: time.Now()}, + {Type: conditionTypeAvailable, Status: available, LastTransitionTime: time.Now()}, } conditionsJSON, _ := json.Marshal(conditions) now := time.Now() @@ -516,8 +532,10 @@ func TestClusterAvailableReadyTransitions(t *testing.T) { // This is an edge case where an adapter reports a gen=1 status after a gen=2 status. // Since we don't allow downgrading observed generations, we should not overwrite the cluster conditions. // And Available should remain True, but in reality it should be False. - // This should be an unexpected edge case, since once a resource changes generation, all adapters should report a gen=2 status. - // So, while we are keeping Available True for gen=1, there should be soon an update to gen=2, which will overwrite the Available condition. + // This should be an unexpected edge case, since once a resource changes generation, + // all adapters should report a gen=2 status. + // So, while we are keeping Available True for gen=1, + // there should be soon an update to gen=2, which will overwrite the Available condition. upsert("validation", api.AdapterConditionFalse, 1) avail, ready = getSynth() Expect(avail.Status).To(Equal(api.ConditionTrue)) // <-- this is the edge case @@ -539,8 +557,11 @@ func TestClusterAvailableReadyTransitions(t *testing.T) { Expect(ready.Status).To(Equal(api.ConditionFalse)) // Available=Unknown is a no-op (does not store, does not overwrite cluster conditions). - prevStatus := append(api.Cluster{}.StatusConditions, clusterDao.clusters[clusterID].StatusConditions...) - unknownConds := []api.AdapterCondition{{Type: "Available", Status: api.AdapterConditionUnknown, LastTransitionTime: time.Now()}} + prevStatus := api.Cluster{}.StatusConditions + prevStatus = append(prevStatus, clusterDao.clusters[clusterID].StatusConditions...) + unknownConds := []api.AdapterCondition{ + {Type: conditionTypeAvailable, Status: api.AdapterConditionUnknown, LastTransitionTime: time.Now()}, + } unknownJSON, _ := json.Marshal(unknownConds) unknownStatus := &api.AdapterStatus{ ResourceType: "Cluster", @@ -566,7 +587,7 @@ func TestClusterStaleAdapterStatusUpdatePolicy(t *testing.T) { service := NewClusterService(clusterDao, adapterStatusDao, adapterConfig) ctx := context.Background() - clusterID := "test-cluster-id" + clusterID := testClusterID cluster := &api.Cluster{Generation: 2} cluster.ID = clusterID @@ -580,7 +601,7 @@ func TestClusterStaleAdapterStatusUpdatePolicy(t *testing.T) { var conds []api.ResourceCondition Expect(json.Unmarshal(stored.StatusConditions, &conds)).To(Succeed()) for i := range conds { - if conds[i].Type == "Available" { + if conds[i].Type == conditionTypeAvailable { return conds[i] } } @@ -590,7 +611,7 @@ func TestClusterStaleAdapterStatusUpdatePolicy(t *testing.T) { upsert := func(adapter string, available api.AdapterConditionStatus, observedGen int32) { conditions := []api.AdapterCondition{ - {Type: "Available", Status: available, LastTransitionTime: time.Now()}, + {Type: conditionTypeAvailable, Status: available, LastTransitionTime: time.Now()}, } conditionsJSON, _ := json.Marshal(conditions) now := time.Now() @@ -641,12 +662,12 @@ func TestClusterSyntheticTimestampsStableWithoutAdapterStatus(t *testing.T) { service := NewClusterService(clusterDao, adapterStatusDao, adapterConfig) ctx := context.Background() - clusterID := "test-cluster-id" + clusterID := testClusterID fixedNow := time.Date(2026, 1, 1, 12, 0, 0, 0, time.UTC) initialConditions := []api.ResourceCondition{ { - Type: "Available", + Type: conditionTypeAvailable, Status: api.ConditionFalse, ObservedGeneration: 1, LastTransitionTime: fixedNow, @@ -679,9 +700,9 @@ func TestClusterSyntheticTimestampsStableWithoutAdapterStatus(t *testing.T) { var createdAvailable, createdReady *api.ResourceCondition for i := range createdConds { switch createdConds[i].Type { - case "Available": + case conditionTypeAvailable: createdAvailable = &createdConds[i] - case "Ready": + case conditionTypeReady: createdReady = &createdConds[i] } } @@ -704,9 +725,9 @@ func TestClusterSyntheticTimestampsStableWithoutAdapterStatus(t *testing.T) { var updatedAvailable, updatedReady *api.ResourceCondition for i := range updatedConds { switch updatedConds[i].Type { - case "Available": + case conditionTypeAvailable: updatedAvailable = &updatedConds[i] - case "Ready": + case conditionTypeReady: updatedReady = &updatedConds[i] } } diff --git a/pkg/services/generic.go b/pkg/services/generic.go index 84d59a0..6a2f4c0 100755 --- a/pkg/services/generic.go +++ b/pkg/services/generic.go @@ -24,7 +24,9 @@ import ( //go:generate mockgen-v0.6.0 -source=generic.go -package=services -destination=generic_mock.go type GenericService interface { - List(ctx context.Context, username string, args *ListArguments, resourceList interface{}) (*api.PagingMeta, *errors.ServiceError) + List( + ctx context.Context, username string, args *ListArguments, resourceList interface{}, + ) (*api.PagingMeta, *errors.ServiceError) } func NewGenericService(genericDao dao.GenericDao) GenericService { @@ -63,7 +65,9 @@ type listContext struct { set map[string]bool } -func (s *sqlGenericService) newListContext(ctx context.Context, username string, args *ListArguments, resourceList interface{}) (*listContext, interface{}, *errors.ServiceError) { +func (s *sqlGenericService) newListContext( + ctx context.Context, username string, args *ListArguments, resourceList interface{}, +) (*listContext, interface{}, *errors.ServiceError) { resourceModel := reflect.TypeOf(resourceList).Elem().Elem() resourceTypeStr := resourceModel.Name() if resourceTypeStr == "" { @@ -86,7 +90,9 @@ func (s *sqlGenericService) newListContext(ctx context.Context, username string, } // List resourceList must be a pointer to a slice of database resource objects -func (s *sqlGenericService) List(ctx context.Context, username string, args *ListArguments, resourceList interface{}) (*api.PagingMeta, *errors.ServiceError) { +func (s *sqlGenericService) List( + ctx context.Context, username string, args *ListArguments, resourceList interface{}, +) (*api.PagingMeta, *errors.ServiceError) { listCtx, model, err := s.newListContext(ctx, username, args, resourceList) if err != nil { return nil, err @@ -154,7 +160,9 @@ func (s *sqlGenericService) buildOrderBy(listCtx *listContext, d *dao.GenericDao return false, nil } -func (s *sqlGenericService) buildSearchValues(listCtx *listContext, d *dao.GenericDao) (string, []any, *errors.ServiceError) { +func (s *sqlGenericService) buildSearchValues( + listCtx *listContext, d *dao.GenericDao, +) (string, []any, *errors.ServiceError) { if listCtx.args.Search == "" { s.addJoins(listCtx, d) return "", nil, nil @@ -276,7 +284,8 @@ func (s *sqlGenericService) loadList(listCtx *listContext, d *dao.GenericDao) *e case args.Size == 0: // This early return is not only performant, but also necessary. // gorm does not support Limit(0) any longer. - logger.Info(listCtx.ctx, "A query with 0 size requested, returning early without collecting any resources from database") + logger.Info(listCtx.ctx, + "A query with 0 size requested, returning early without collecting any resources from database") return nil } @@ -312,7 +321,9 @@ func zeroSlice(i interface{}, cap int64) *errors.ServiceError { // walk the TSL tree looking for fields like, e.g., creator.username, and then: // (1) look up the related table by its 1st part - creator // (2) replace it by table name - creator.username -> accounts.username -func (s *sqlGenericService) treeWalkForRelatedTables(listCtx *listContext, tslTree tsl.Node, genericDao *dao.GenericDao) (tsl.Node, *errors.ServiceError) { +func (s *sqlGenericService) treeWalkForRelatedTables( + listCtx *listContext, tslTree tsl.Node, genericDao *dao.GenericDao, +) (tsl.Node, *errors.ServiceError) { resourceTable := (*genericDao).GetTableName() if listCtx.joins == nil { listCtx.joins = map[string]dao.TableRelation{} @@ -326,10 +337,13 @@ func (s *sqlGenericService) treeWalkForRelatedTables(listCtx *listContext, tslTr if relation, ok := (*genericDao).GetTableRelation(fieldName); ok { listCtx.joins[fieldName] = relation } else { - return field, fmt.Errorf("%s is not a related resource of %s", fieldName, listCtx.resourceType) + return field, fmt.Errorf( + "%s is not a related resource of %s", + fieldName, listCtx.resourceType, + ) } } - //replace by table name + // replace by table name fieldParts[0] = listCtx.joins[fieldName].ForeignTableName return strings.Join(fieldParts, "."), nil } @@ -345,7 +359,11 @@ func (s *sqlGenericService) treeWalkForRelatedTables(listCtx *listContext, tslTr } // prepend table name to these "free" identifiers since they could cause "ambiguous" errors -func (s *sqlGenericService) treeWalkForAddingTableName(listCtx *listContext, tslTree tsl.Node, dao *dao.GenericDao) (tsl.Node, *errors.ServiceError) { +func (s *sqlGenericService) treeWalkForAddingTableName( + _ *listContext, + tslTree tsl.Node, + dao *dao.GenericDao, +) (tsl.Node, *errors.ServiceError) { resourceTable := (*dao).GetTableName() walkFn := func(field string) (string, error) { @@ -367,7 +385,10 @@ func (s *sqlGenericService) treeWalkForAddingTableName(listCtx *listContext, tsl return tslTree, nil } -func (s *sqlGenericService) treeWalkForSqlizer(listCtx *listContext, tslTree tsl.Node) (squirrel.Sqlizer, *errors.ServiceError) { +func (s *sqlGenericService) treeWalkForSqlizer( + _ *listContext, + tslTree tsl.Node, +) (squirrel.Sqlizer, *errors.ServiceError) { // Note: FieldNameWalk is now called earlier in buildSearchValues to ensure field mapping // happens before related table detection. No need to call it again here. diff --git a/pkg/services/generic_test.go b/pkg/services/generic_test.go index dc570b6..bddf025 100755 --- a/pkg/services/generic_test.go +++ b/pkg/services/generic_test.go @@ -40,7 +40,9 @@ func TestSQLTranslation(t *testing.T) { var list []api.Cluster search := test["search"].(string) errorMsg := test["error"].(string) - listCtx, model, serviceErr := genericService.newListContext(context.Background(), "", &ListArguments{Search: search}, &list) + listCtx, model, serviceErr := genericService.newListContext( + context.Background(), "", &ListArguments{Search: search}, &list, + ) Expect(serviceErr).ToNot(HaveOccurred()) d := g.GetInstanceDao(context.Background(), model) _, serviceErr = genericService.buildSearch(listCtx, &d) @@ -75,7 +77,9 @@ func TestSQLTranslation(t *testing.T) { search := test["search"].(string) sqlReal := test["sql"].(string) valuesReal := test["values"].(types.GomegaMatcher) - listCtx, _, serviceErr := genericService.newListContext(context.Background(), "", &ListArguments{Search: search}, &list) + listCtx, _, serviceErr := genericService.newListContext( + context.Background(), "", &ListArguments{Search: search}, &list, + ) Expect(serviceErr).ToNot(HaveOccurred()) tslTree, err := tsl.ParseTSL(search) Expect(err).ToNot(HaveOccurred()) diff --git a/pkg/services/node_pool.go b/pkg/services/node_pool.go index 8309c1e..30dee7a 100644 --- a/pkg/services/node_pool.go +++ b/pkg/services/node_pool.go @@ -32,14 +32,20 @@ type NodePoolService interface { // ProcessAdapterStatus handles the business logic for adapter status: // - If Available condition is "Unknown": returns (nil, nil) indicating no-op // - Otherwise: upserts the status and triggers aggregation - ProcessAdapterStatus(ctx context.Context, nodePoolID string, adapterStatus *api.AdapterStatus) (*api.AdapterStatus, *errors.ServiceError) + ProcessAdapterStatus( + ctx context.Context, nodePoolID string, adapterStatus *api.AdapterStatus, + ) (*api.AdapterStatus, *errors.ServiceError) // idempotent functions for the control plane, but can also be called synchronously by any actor OnUpsert(ctx context.Context, id string) error OnDelete(ctx context.Context, id string) error } -func NewNodePoolService(nodePoolDao dao.NodePoolDao, adapterStatusDao dao.AdapterStatusDao, adapterConfig *config.AdapterRequirementsConfig) NodePoolService { +func NewNodePoolService( + nodePoolDao dao.NodePoolDao, + adapterStatusDao dao.AdapterStatusDao, + adapterConfig *config.AdapterRequirementsConfig, +) NodePoolService { return &sqlNodePoolService{ nodePoolDao: nodePoolDao, adapterStatusDao: adapterStatusDao, @@ -82,7 +88,9 @@ func (s *sqlNodePoolService) Create(ctx context.Context, nodePool *api.NodePool) return updatedNodePool, nil } -func (s *sqlNodePoolService) Replace(ctx context.Context, nodePool *api.NodePool) (*api.NodePool, *errors.ServiceError) { +func (s *sqlNodePoolService) Replace( + ctx context.Context, nodePool *api.NodePool, +) (*api.NodePool, *errors.ServiceError) { nodePool, err := s.nodePoolDao.Replace(ctx, nodePool) if err != nil { return nil, handleUpdateError("NodePool", err) @@ -123,7 +131,8 @@ func (s *sqlNodePoolService) OnUpsert(ctx context.Context, id string) error { return err } - logger.With(ctx, logger.FieldNodePoolID, nodePool.ID).Info("Perform idempotent operations on node pool") + logger.With(ctx, logger.FieldNodePoolID, nodePool.ID). + Info("Perform idempotent operations on node pool") return nil } @@ -134,7 +143,9 @@ func (s *sqlNodePoolService) OnDelete(ctx context.Context, id string) error { } // UpdateNodePoolStatusFromAdapters aggregates adapter statuses into nodepool status -func (s *sqlNodePoolService) UpdateNodePoolStatusFromAdapters(ctx context.Context, nodePoolID string) (*api.NodePool, *errors.ServiceError) { +func (s *sqlNodePoolService) UpdateNodePoolStatusFromAdapters( + ctx context.Context, nodePoolID string, +) (*api.NodePool, *errors.ServiceError) { // Get the nodepool nodePool, err := s.nodePoolDao.Get(ctx, nodePoolID) if err != nil { @@ -162,7 +173,7 @@ func (s *sqlNodePoolService) UpdateNodePoolStatusFromAdapters(ctx context.Contex // Find the "Available" condition var availableCondition *api.AdapterCondition for i := range conditions { - if conditions[i].Type == "Available" { + if conditions[i].Type == conditionTypeAvailable { availableCondition = &conditions[i] break } @@ -229,8 +240,12 @@ func (s *sqlNodePoolService) UpdateNodePoolStatusFromAdapters(ctx context.Contex // ProcessAdapterStatus handles the business logic for adapter status: // - If Available condition is "Unknown": returns (nil, nil) indicating no-op // - Otherwise: upserts the status and triggers aggregation -func (s *sqlNodePoolService) ProcessAdapterStatus(ctx context.Context, nodePoolID string, adapterStatus *api.AdapterStatus) (*api.AdapterStatus, *errors.ServiceError) { - existingStatus, findErr := s.adapterStatusDao.FindByResourceAndAdapter(ctx, "NodePool", nodePoolID, adapterStatus.Adapter) +func (s *sqlNodePoolService) ProcessAdapterStatus( + ctx context.Context, nodePoolID string, adapterStatus *api.AdapterStatus, +) (*api.AdapterStatus, *errors.ServiceError) { + existingStatus, findErr := s.adapterStatusDao.FindByResourceAndAdapter( + ctx, "NodePool", nodePoolID, adapterStatus.Adapter, + ) if findErr != nil && !stderrors.Is(findErr, gorm.ErrRecordNotFound) { if !strings.Contains(findErr.Error(), errors.CodeNotFoundGeneric) { return nil, errors.GeneralError("Failed to get adapter status: %s", findErr) @@ -252,7 +267,7 @@ func (s *sqlNodePoolService) ProcessAdapterStatus(ctx context.Context, nodePoolI // Find the "Available" condition hasAvailableCondition := false for _, cond := range conditions { - if cond.Type != "Available" { + if cond.Type != conditionTypeAvailable { continue } @@ -273,9 +288,12 @@ func (s *sqlNodePoolService) ProcessAdapterStatus(ctx context.Context, nodePoolI // If the adapter status doesn't include Available, saving it should not overwrite // the nodepool's synthetic Available/Ready conditions. if hasAvailableCondition { - if _, aggregateErr := s.UpdateNodePoolStatusFromAdapters(ctx, nodePoolID); aggregateErr != nil { + if _, aggregateErr := s.UpdateNodePoolStatusFromAdapters( + ctx, nodePoolID, + ); aggregateErr != nil { // Log error but don't fail the request - the status will be computed on next update - logger.With(ctx, logger.FieldNodePoolID, nodePoolID).WithError(aggregateErr).Warn("Failed to aggregate nodepool status") + logger.With(ctx, logger.FieldNodePoolID, nodePoolID). + WithError(aggregateErr).Warn("Failed to aggregate nodepool status") } } diff --git a/pkg/services/node_pool_test.go b/pkg/services/node_pool_test.go index 21d6e6c..6fd8b51 100644 --- a/pkg/services/node_pool_test.go +++ b/pkg/services/node_pool_test.go @@ -14,6 +14,10 @@ import ( "github.com/openshift-hyperfleet/hyperfleet-api/pkg/errors" ) +const ( + testNodePoolID = "test-nodepool-id" +) + // Mock implementations for testing NodePool ProcessAdapterStatus type mockNodePoolDao struct { @@ -79,12 +83,12 @@ func TestNodePoolProcessAdapterStatus_UnknownCondition(t *testing.T) { service := NewNodePoolService(nodePoolDao, adapterStatusDao, config) ctx := context.Background() - nodePoolID := "test-nodepool-id" + nodePoolID := testNodePoolID // Create adapter status with Available=Unknown conditions := []api.AdapterCondition{ { - Type: "Available", + Type: conditionTypeAvailable, Status: api.AdapterConditionUnknown, LastTransitionTime: time.Now(), }, @@ -119,7 +123,7 @@ func TestNodePoolProcessAdapterStatus_TrueCondition(t *testing.T) { service := NewNodePoolService(nodePoolDao, adapterStatusDao, config) ctx := context.Background() - nodePoolID := "test-nodepool-id" + nodePoolID := testNodePoolID // Create the nodepool first nodePool := &api.NodePool{ @@ -132,7 +136,7 @@ func TestNodePoolProcessAdapterStatus_TrueCondition(t *testing.T) { // Create adapter status with Available=True conditions := []api.AdapterCondition{ { - Type: "Available", + Type: conditionTypeAvailable, Status: api.AdapterConditionTrue, LastTransitionTime: time.Now(), }, @@ -170,17 +174,17 @@ func TestNodePoolProcessAdapterStatus_MultipleConditions_AvailableUnknown(t *tes service := NewNodePoolService(nodePoolDao, adapterStatusDao, config) ctx := context.Background() - nodePoolID := "test-nodepool-id" + nodePoolID := testNodePoolID // Create adapter status with multiple conditions including Available=Unknown conditions := []api.AdapterCondition{ { - Type: "Ready", + Type: conditionTypeReady, Status: api.AdapterConditionTrue, LastTransitionTime: time.Now(), }, { - Type: "Available", + Type: conditionTypeAvailable, Status: api.AdapterConditionUnknown, LastTransitionTime: time.Now(), }, @@ -216,7 +220,7 @@ func TestNodePoolAvailableReadyTransitions(t *testing.T) { service := NewNodePoolService(nodePoolDao, adapterStatusDao, adapterConfig) ctx := context.Background() - nodePoolID := "test-nodepool-id" + nodePoolID := testNodePoolID nodePool := &api.NodePool{Generation: 1} nodePool.ID = nodePoolID @@ -234,9 +238,9 @@ func TestNodePoolAvailableReadyTransitions(t *testing.T) { var available, ready *api.ResourceCondition for i := range conds { switch conds[i].Type { - case "Available": + case conditionTypeAvailable: available = &conds[i] - case "Ready": + case conditionTypeReady: ready = &conds[i] } } @@ -247,7 +251,7 @@ func TestNodePoolAvailableReadyTransitions(t *testing.T) { upsert := func(adapter string, available api.AdapterConditionStatus, observedGen int32) { conditions := []api.AdapterCondition{ - {Type: "Available", Status: available, LastTransitionTime: time.Now()}, + {Type: conditionTypeAvailable, Status: available, LastTransitionTime: time.Now()}, } conditionsJSON, _ := json.Marshal(conditions) now := time.Now() @@ -320,8 +324,11 @@ func TestNodePoolAvailableReadyTransitions(t *testing.T) { Expect(ready.Status).To(Equal(api.ConditionFalse)) // Adapter status with no Available condition should not overwrite synthetic conditions. - prevStatus := append(api.NodePool{}.StatusConditions, nodePoolDao.nodePools[nodePoolID].StatusConditions...) - nonAvailableConds := []api.AdapterCondition{{Type: "Health", Status: api.AdapterConditionTrue, LastTransitionTime: time.Now()}} + prevStatus := api.NodePool{}.StatusConditions + prevStatus = append(prevStatus, nodePoolDao.nodePools[nodePoolID].StatusConditions...) + nonAvailableConds := []api.AdapterCondition{ + {Type: "Health", Status: api.AdapterConditionTrue, LastTransitionTime: time.Now()}, + } nonAvailableJSON, _ := json.Marshal(nonAvailableConds) nonAvailableStatus := &api.AdapterStatus{ ResourceType: "NodePool", @@ -336,8 +343,11 @@ func TestNodePoolAvailableReadyTransitions(t *testing.T) { Expect(nodePoolDao.nodePools[nodePoolID].StatusConditions).To(Equal(prevStatus)) // Available=Unknown is a no-op (does not store, does not overwrite nodepool conditions). - prevStatus = append(api.NodePool{}.StatusConditions, nodePoolDao.nodePools[nodePoolID].StatusConditions...) - unknownConds := []api.AdapterCondition{{Type: "Available", Status: api.AdapterConditionUnknown, LastTransitionTime: time.Now()}} + prevStatus = api.NodePool{}.StatusConditions + prevStatus = append(prevStatus, nodePoolDao.nodePools[nodePoolID].StatusConditions...) + unknownConds := []api.AdapterCondition{ + {Type: conditionTypeAvailable, Status: api.AdapterConditionUnknown, LastTransitionTime: time.Now()}, + } unknownJSON, _ := json.Marshal(unknownConds) unknownStatus := &api.AdapterStatus{ ResourceType: "NodePool", @@ -363,7 +373,7 @@ func TestNodePoolStaleAdapterStatusUpdatePolicy(t *testing.T) { service := NewNodePoolService(nodePoolDao, adapterStatusDao, adapterConfig) ctx := context.Background() - nodePoolID := "test-nodepool-id" + nodePoolID := testNodePoolID nodePool := &api.NodePool{Generation: 2} nodePool.ID = nodePoolID @@ -377,7 +387,7 @@ func TestNodePoolStaleAdapterStatusUpdatePolicy(t *testing.T) { var conds []api.ResourceCondition Expect(json.Unmarshal(stored.StatusConditions, &conds)).To(Succeed()) for i := range conds { - if conds[i].Type == "Available" { + if conds[i].Type == conditionTypeAvailable { return conds[i] } } @@ -387,7 +397,7 @@ func TestNodePoolStaleAdapterStatusUpdatePolicy(t *testing.T) { upsert := func(adapter string, available api.AdapterConditionStatus, observedGen int32) { conditions := []api.AdapterCondition{ - {Type: "Available", Status: available, LastTransitionTime: time.Now()}, + {Type: conditionTypeAvailable, Status: available, LastTransitionTime: time.Now()}, } conditionsJSON, _ := json.Marshal(conditions) now := time.Now() @@ -438,12 +448,12 @@ func TestNodePoolSyntheticTimestampsStableWithoutAdapterStatus(t *testing.T) { service := NewNodePoolService(nodePoolDao, adapterStatusDao, adapterConfig) ctx := context.Background() - nodePoolID := "test-nodepool-id" + nodePoolID := testNodePoolID fixedNow := time.Date(2026, 1, 1, 12, 0, 0, 0, time.UTC) initialConditions := []api.ResourceCondition{ { - Type: "Available", + Type: conditionTypeAvailable, Status: api.ConditionFalse, ObservedGeneration: 1, LastTransitionTime: fixedNow, @@ -451,7 +461,7 @@ func TestNodePoolSyntheticTimestampsStableWithoutAdapterStatus(t *testing.T) { LastUpdatedTime: fixedNow, }, { - Type: "Ready", + Type: conditionTypeReady, Status: api.ConditionFalse, ObservedGeneration: 1, LastTransitionTime: fixedNow, @@ -476,9 +486,9 @@ func TestNodePoolSyntheticTimestampsStableWithoutAdapterStatus(t *testing.T) { var createdAvailable, createdReady *api.ResourceCondition for i := range createdConds { switch createdConds[i].Type { - case "Available": + case conditionTypeAvailable: createdAvailable = &createdConds[i] - case "Ready": + case conditionTypeReady: createdReady = &createdConds[i] } } @@ -501,9 +511,9 @@ func TestNodePoolSyntheticTimestampsStableWithoutAdapterStatus(t *testing.T) { var updatedAvailable, updatedReady *api.ResourceCondition for i := range updatedConds { switch updatedConds[i].Type { - case "Available": + case conditionTypeAvailable: updatedAvailable = &updatedConds[i] - case "Ready": + case conditionTypeReady: updatedReady = &updatedConds[i] } } diff --git a/pkg/services/status_aggregation.go b/pkg/services/status_aggregation.go index c2fc659..b1a155b 100644 --- a/pkg/services/status_aggregation.go +++ b/pkg/services/status_aggregation.go @@ -10,6 +10,11 @@ import ( "github.com/openshift-hyperfleet/hyperfleet-api/pkg/api" ) +const ( + conditionTypeAvailable = "Available" + conditionTypeReady = "Ready" +) + // Required adapter lists configured via pkg/config/adapter.go (see AdapterRequirementsConfig) // adapterConditionSuffixMap allows overriding the default suffix for specific adapters @@ -82,7 +87,7 @@ func ComputeAvailableCondition(adapterStatuses api.AdapterStatusList, requiredAd if len(adapterStatus.Conditions) > 0 { if err := json.Unmarshal(adapterStatus.Conditions, &conditions); err == nil { for _, cond := range conditions { - if cond.Type == "Available" { + if cond.Type == conditionTypeAvailable { adapterMap[adapterStatus.Adapter] = struct { available string observedGeneration int32 @@ -131,7 +136,9 @@ func ComputeAvailableCondition(adapterStatuses api.AdapterStatusList, requiredAd // ComputeReadyCondition checks if all required adapters have Available=True at the CURRENT generation. // "Ready" means the system is running at the latest spec generation. -func ComputeReadyCondition(adapterStatuses api.AdapterStatusList, requiredAdapters []string, resourceGeneration int32) bool { +func ComputeReadyCondition( + adapterStatuses api.AdapterStatusList, requiredAdapters []string, resourceGeneration int32, +) bool { if len(adapterStatuses) == 0 || len(requiredAdapters) == 0 { return false } @@ -151,7 +158,7 @@ func ComputeReadyCondition(adapterStatuses api.AdapterStatusList, requiredAdapte if len(adapterStatus.Conditions) > 0 { if err := json.Unmarshal(adapterStatus.Conditions, &conditions); err == nil { for _, cond := range conditions { - if cond.Type == "Available" { + if cond.Type == conditionTypeAvailable { adapterMap[adapterStatus.Adapter] = struct { available string observedGeneration int32 @@ -194,7 +201,13 @@ func ComputeReadyCondition(adapterStatuses api.AdapterStatusList, requiredAdapte return numReady == numRequired } -func BuildSyntheticConditions(existingConditionsJSON []byte, adapterStatuses api.AdapterStatusList, requiredAdapters []string, resourceGeneration int32, now time.Time) (api.ResourceCondition, api.ResourceCondition) { +func BuildSyntheticConditions( + existingConditionsJSON []byte, + adapterStatuses api.AdapterStatusList, + requiredAdapters []string, + resourceGeneration int32, + now time.Time, +) (api.ResourceCondition, api.ResourceCondition) { var existingAvailable *api.ResourceCondition var existingReady *api.ResourceCondition @@ -203,9 +216,9 @@ func BuildSyntheticConditions(existingConditionsJSON []byte, adapterStatuses api if err := json.Unmarshal(existingConditionsJSON, &existingConditions); err == nil { for i := range existingConditions { switch existingConditions[i].Type { - case "Available": + case conditionTypeAvailable: existingAvailable = &existingConditions[i] - case "Ready": + case conditionTypeReady: existingReady = &existingConditions[i] } } @@ -218,7 +231,7 @@ func BuildSyntheticConditions(existingConditionsJSON []byte, adapterStatuses api availableStatus = api.ConditionTrue } availableCondition := api.ResourceCondition{ - Type: "Available", + Type: conditionTypeAvailable, Status: availableStatus, ObservedGeneration: minObservedGeneration, LastTransitionTime: now, @@ -233,7 +246,7 @@ func BuildSyntheticConditions(existingConditionsJSON []byte, adapterStatuses api readyStatus = api.ConditionTrue } readyCondition := api.ResourceCondition{ - Type: "Ready", + Type: conditionTypeReady, Status: readyStatus, ObservedGeneration: resourceGeneration, LastTransitionTime: now, diff --git a/pkg/telemetry/otel.go b/pkg/telemetry/otel.go index 34f752f..73a8b28 100644 --- a/pkg/telemetry/otel.go +++ b/pkg/telemetry/otel.go @@ -15,7 +15,9 @@ import ( // InitTraceProvider initializes OpenTelemetry trace provider // Uses stdout exporter (traces output to logs, no external Collector needed) // Future upgrade: Switch to OTLP HTTP exporter by changing only the exporter creation -func InitTraceProvider(ctx context.Context, serviceName, serviceVersion string, samplingRate float64) (*trace.TracerProvider, error) { +func InitTraceProvider( + ctx context.Context, serviceName, serviceVersion string, samplingRate float64, +) (*trace.TracerProvider, error) { // Create stdout exporter exporter, err := stdouttrace.New( stdouttrace.WithPrettyPrint(), // Formatted output @@ -42,11 +44,12 @@ func InitTraceProvider(ctx context.Context, serviceName, serviceVersion string, // Determine sampler based on sampling rate var sampler trace.Sampler - if samplingRate >= 1.0 { + switch { + case samplingRate >= 1.0: sampler = trace.AlwaysSample() // Sample all - } else if samplingRate <= 0.0 { + case samplingRate <= 0.0: sampler = trace.NeverSample() // Sample none - } else { + default: sampler = trace.TraceIDRatioBased(samplingRate) } diff --git a/pkg/validators/schema_validator.go b/pkg/validators/schema_validator.go index 1180030..3226e5d 100644 --- a/pkg/validators/schema_validator.go +++ b/pkg/validators/schema_validator.go @@ -78,19 +78,23 @@ func (v *SchemaValidator) Validate(resourceType string, spec map[string]interfac } // ValidateClusterSpec validates a cluster spec against the ClusterSpec schema +// // Deprecated: Use Validate("cluster", spec) instead func (v *SchemaValidator) ValidateClusterSpec(spec map[string]interface{}) error { return v.Validate("cluster", spec) } // ValidateNodePoolSpec validates a nodepool spec against the NodePoolSpec schema +// // Deprecated: Use Validate("nodepool", spec) instead func (v *SchemaValidator) ValidateNodePoolSpec(spec map[string]interface{}) error { return v.Validate("nodepool", spec) } // validateSpec performs the actual validation and converts errors to our error format -func (v *SchemaValidator) validateSpec(spec map[string]interface{}, schemaRef *openapi3.SchemaRef, specTypeName string) error { +func (v *SchemaValidator) validateSpec( + spec map[string]interface{}, schemaRef *openapi3.SchemaRef, specTypeName string, +) error { // Cast spec to interface{} for VisitJSON var specData interface{} = spec diff --git a/pkg/validators/schema_validator_test.go b/pkg/validators/schema_validator_test.go index 7f48624..07ca30c 100644 --- a/pkg/validators/schema_validator_test.go +++ b/pkg/validators/schema_validator_test.go @@ -59,7 +59,7 @@ func TestNewSchemaValidator(t *testing.T) { // Create temporary schema file tmpDir := t.TempDir() schemaPath := filepath.Join(tmpDir, "test-schema.yaml") - err := os.WriteFile(schemaPath, []byte(testSchema), 0644) + err := os.WriteFile(schemaPath, []byte(testSchema), 0600) Expect(err).To(BeNil()) // Test successful schema loading @@ -103,7 +103,7 @@ components: tmpDir := t.TempDir() schemaPath := filepath.Join(tmpDir, "invalid-schema.yaml") - err := os.WriteFile(schemaPath, []byte(invalidSchema), 0644) + err := os.WriteFile(schemaPath, []byte(invalidSchema), 0600) Expect(err).To(BeNil()) // Should fail because ClusterSpec is missing @@ -329,7 +329,7 @@ func TestValidateNodePoolSpec_EmptyMachineType(t *testing.T) { func setupTestValidator(t *testing.T) *SchemaValidator { tmpDir := t.TempDir() schemaPath := filepath.Join(tmpDir, "test-schema.yaml") - err := os.WriteFile(schemaPath, []byte(testSchema), 0644) + err := os.WriteFile(schemaPath, []byte(testSchema), 0600) if err != nil { t.Fatalf("Failed to create test schema: %v", err) } diff --git a/test/factories/clusters.go b/test/factories/clusters.go index 583ccc9..da69822 100644 --- a/test/factories/clusters.go +++ b/test/factories/clusters.go @@ -62,7 +62,9 @@ func reloadCluster(dbSession *gorm.DB, cluster *api.Cluster) error { // NewClusterWithStatus creates a cluster with specific status conditions // dbFactory parameter is needed to update database fields // The isAvailable and isReady parameters control which synthetic conditions are set -func NewClusterWithStatus(f *Factories, dbFactory db.SessionFactory, id string, isAvailable, isReady bool) (*api.Cluster, error) { +func NewClusterWithStatus( + f *Factories, dbFactory db.SessionFactory, id string, isAvailable, isReady bool, +) (*api.Cluster, error) { cluster, err := f.NewCluster(id) if err != nil { return nil, err @@ -117,7 +119,9 @@ func NewClusterWithStatus(f *Factories, dbFactory db.SessionFactory, id string, } // NewClusterWithLabels creates a cluster with specific labels -func NewClusterWithLabels(f *Factories, dbFactory db.SessionFactory, id string, labels map[string]string) (*api.Cluster, error) { +func NewClusterWithLabels( + f *Factories, dbFactory db.SessionFactory, id string, labels map[string]string, +) (*api.Cluster, error) { cluster, err := f.NewCluster(id) if err != nil { return nil, err @@ -143,7 +147,9 @@ func NewClusterWithLabels(f *Factories, dbFactory db.SessionFactory, id string, } // NewClusterWithStatusAndLabels creates a cluster with both status conditions and labels -func NewClusterWithStatusAndLabels(f *Factories, dbFactory db.SessionFactory, id string, isAvailable, isReady bool, labels map[string]string) (*api.Cluster, error) { +func NewClusterWithStatusAndLabels( + f *Factories, dbFactory db.SessionFactory, id string, isAvailable, isReady bool, labels map[string]string, +) (*api.Cluster, error) { cluster, err := NewClusterWithStatus(f, dbFactory, id, isAvailable, isReady) if err != nil { return nil, err diff --git a/test/factories/node_pools.go b/test/factories/node_pools.go index 990de22..1ad9076 100644 --- a/test/factories/node_pools.go +++ b/test/factories/node_pools.go @@ -82,7 +82,9 @@ func reloadNodePool(dbSession *gorm.DB, nodePool *api.NodePool) error { // NewNodePoolWithStatus creates a node pool with specific status conditions // dbFactory parameter is needed to update database fields // The isAvailable and isReady parameters control which synthetic conditions are set -func NewNodePoolWithStatus(f *Factories, dbFactory db.SessionFactory, id string, isAvailable, isReady bool) (*api.NodePool, error) { +func NewNodePoolWithStatus( + f *Factories, dbFactory db.SessionFactory, id string, isAvailable, isReady bool, +) (*api.NodePool, error) { nodePool, err := f.NewNodePool(id) if err != nil { return nil, err @@ -137,7 +139,9 @@ func NewNodePoolWithStatus(f *Factories, dbFactory db.SessionFactory, id string, } // NewNodePoolWithLabels creates a node pool with specific labels -func NewNodePoolWithLabels(f *Factories, dbFactory db.SessionFactory, id string, labels map[string]string) (*api.NodePool, error) { +func NewNodePoolWithLabels( + f *Factories, dbFactory db.SessionFactory, id string, labels map[string]string, +) (*api.NodePool, error) { nodePool, err := f.NewNodePool(id) if err != nil { return nil, err @@ -163,7 +167,9 @@ func NewNodePoolWithLabels(f *Factories, dbFactory db.SessionFactory, id string, } // NewNodePoolWithStatusAndLabels creates a node pool with both status conditions and labels -func NewNodePoolWithStatusAndLabels(f *Factories, dbFactory db.SessionFactory, id string, isAvailable, isReady bool, labels map[string]string) (*api.NodePool, error) { +func NewNodePoolWithStatusAndLabels( + f *Factories, dbFactory db.SessionFactory, id string, isAvailable, isReady bool, labels map[string]string, +) (*api.NodePool, error) { nodePool, err := NewNodePoolWithStatus(f, dbFactory, id, isAvailable, isReady) if err != nil { return nil, err diff --git a/test/integration/adapter_status_test.go b/test/integration/adapter_status_test.go index dff37a7..6b2a114 100644 --- a/test/integration/adapter_status_test.go +++ b/test/integration/adapter_status_test.go @@ -14,7 +14,9 @@ import ( ) // Helper to create AdapterStatusCreateRequest -func newAdapterStatusRequest(adapter string, observedGen int32, conditions []openapi.ConditionRequest, data *map[string]interface{}) openapi.AdapterStatusCreateRequest { +func newAdapterStatusRequest( + adapter string, observedGen int32, conditions []openapi.ConditionRequest, data *map[string]interface{}, +) openapi.AdapterStatusCreateRequest { return openapi.AdapterStatusCreateRequest{ Adapter: adapter, ObservedGeneration: observedGen, @@ -52,7 +54,10 @@ func TestClusterStatusPost(t *testing.T) { &data, ) - resp, err := client.PostClusterStatusesWithResponse(ctx, cluster.ID, openapi.PostClusterStatusesJSONRequestBody(statusInput), test.WithAuthToken(ctx)) + resp, err := client.PostClusterStatusesWithResponse( + ctx, cluster.ID, + openapi.PostClusterStatusesJSONRequestBody(statusInput), test.WithAuthToken(ctx), + ) Expect(err).NotTo(HaveOccurred(), "Error posting cluster status: %v", err) Expect(resp.StatusCode()).To(Equal(http.StatusCreated)) Expect(resp.JSON201).NotTo(BeNil()) @@ -85,7 +90,10 @@ func TestClusterStatusGet(t *testing.T) { }, nil, ) - _, err := client.PostClusterStatusesWithResponse(ctx, cluster.ID, openapi.PostClusterStatusesJSONRequestBody(statusInput), test.WithAuthToken(ctx)) + _, err := client.PostClusterStatusesWithResponse( + ctx, cluster.ID, + openapi.PostClusterStatusesJSONRequestBody(statusInput), test.WithAuthToken(ctx), + ) Expect(err).NotTo(HaveOccurred()) } @@ -129,7 +137,10 @@ func TestNodePoolStatusPost(t *testing.T) { ) // Use nodePool.OwnerID as the cluster_id parameter - resp, err := client.PostNodePoolStatusesWithResponse(ctx, nodePool.OwnerID, nodePool.ID, openapi.PostNodePoolStatusesJSONRequestBody(statusInput), test.WithAuthToken(ctx)) + resp, err := client.PostNodePoolStatusesWithResponse( + ctx, nodePool.OwnerID, nodePool.ID, + openapi.PostNodePoolStatusesJSONRequestBody(statusInput), test.WithAuthToken(ctx), + ) Expect(err).NotTo(HaveOccurred(), "Error posting nodepool status: %v", err) Expect(resp.StatusCode()).To(Equal(http.StatusCreated)) Expect(resp.JSON201).NotTo(BeNil()) @@ -162,7 +173,10 @@ func TestNodePoolStatusGet(t *testing.T) { nil, ) // Use nodePool.OwnerID as the cluster_id parameter - _, err := client.PostNodePoolStatusesWithResponse(ctx, nodePool.OwnerID, nodePool.ID, openapi.PostNodePoolStatusesJSONRequestBody(statusInput), test.WithAuthToken(ctx)) + _, err := client.PostNodePoolStatusesWithResponse( + ctx, nodePool.OwnerID, nodePool.ID, + openapi.PostNodePoolStatusesJSONRequestBody(statusInput), test.WithAuthToken(ctx), + ) Expect(err).NotTo(HaveOccurred()) } @@ -198,7 +212,10 @@ func TestAdapterStatusPaging(t *testing.T) { }, nil, ) - _, err := client.PostClusterStatusesWithResponse(ctx, cluster.ID, openapi.PostClusterStatusesJSONRequestBody(statusInput), test.WithAuthToken(ctx)) + _, err := client.PostClusterStatusesWithResponse( + ctx, cluster.ID, + openapi.PostClusterStatusesJSONRequestBody(statusInput), test.WithAuthToken(ctx), + ) Expect(err).NotTo(HaveOccurred()) } @@ -244,7 +261,10 @@ func TestAdapterStatusIdempotency(t *testing.T) { &data1, ) - resp1, err := client.PostClusterStatusesWithResponse(ctx, cluster.ID, openapi.PostClusterStatusesJSONRequestBody(statusInput1), test.WithAuthToken(ctx)) + resp1, err := client.PostClusterStatusesWithResponse( + ctx, cluster.ID, + openapi.PostClusterStatusesJSONRequestBody(statusInput1), test.WithAuthToken(ctx), + ) Expect(err).NotTo(HaveOccurred()) Expect(resp1.StatusCode()).To(Equal(http.StatusCreated)) Expect(resp1.JSON201).NotTo(BeNil()) @@ -268,7 +288,10 @@ func TestAdapterStatusIdempotency(t *testing.T) { &data2, ) - resp2, err := client.PostClusterStatusesWithResponse(ctx, cluster.ID, openapi.PostClusterStatusesJSONRequestBody(statusInput2), test.WithAuthToken(ctx)) + resp2, err := client.PostClusterStatusesWithResponse( + ctx, cluster.ID, + openapi.PostClusterStatusesJSONRequestBody(statusInput2), test.WithAuthToken(ctx), + ) Expect(err).NotTo(HaveOccurred()) Expect(resp2.StatusCode()).To(Equal(http.StatusCreated)) Expect(resp2.JSON201).NotTo(BeNil()) @@ -292,7 +315,8 @@ func TestAdapterStatusIdempotency(t *testing.T) { // Verify: should have exactly ONE entry for this adapter (updated, not duplicated) Expect(adapterCount).To(Equal(1), "Adapter should be updated, not duplicated") - Expect(finalStatus.Conditions[0].Status).To(Equal(openapi.AdapterConditionStatusTrue), "Conditions should be updated to latest") + Expect(finalStatus.Conditions[0].Status). + To(Equal(openapi.AdapterConditionStatusTrue), "Conditions should be updated to latest") } // TestClusterStatusPost_UnknownReturns204 tests that posting Unknown Available status returns 204 No Content @@ -320,9 +344,13 @@ func TestClusterStatusPost_UnknownReturns204(t *testing.T) { nil, ) - resp, err := client.PostClusterStatusesWithResponse(ctx, cluster.ID, openapi.PostClusterStatusesJSONRequestBody(statusInput), test.WithAuthToken(ctx)) + resp, err := client.PostClusterStatusesWithResponse( + ctx, cluster.ID, + openapi.PostClusterStatusesJSONRequestBody(statusInput), test.WithAuthToken(ctx), + ) Expect(err).NotTo(HaveOccurred(), "Error posting cluster status: %v", err) - Expect(resp.StatusCode()).To(Equal(http.StatusNoContent), "Expected 204 No Content for Unknown status") + Expect(resp.StatusCode()). + To(Equal(http.StatusNoContent), "Expected 204 No Content for Unknown status") // Verify the status was NOT stored listResp, err := client.GetClusterStatusesWithResponse(ctx, cluster.ID, nil, test.WithAuthToken(ctx)) @@ -360,22 +388,30 @@ func TestNodePoolStatusPost_UnknownReturns204(t *testing.T) { nil, ) - resp, err := client.PostNodePoolStatusesWithResponse(ctx, nodePool.OwnerID, nodePool.ID, openapi.PostNodePoolStatusesJSONRequestBody(statusInput), test.WithAuthToken(ctx)) + resp, err := client.PostNodePoolStatusesWithResponse( + ctx, nodePool.OwnerID, nodePool.ID, + openapi.PostNodePoolStatusesJSONRequestBody(statusInput), test.WithAuthToken(ctx), + ) Expect(err).NotTo(HaveOccurred(), "Error posting nodepool status: %v", err) - Expect(resp.StatusCode()).To(Equal(http.StatusNoContent), "Expected 204 No Content for Unknown status") + Expect(resp.StatusCode()). + To(Equal(http.StatusNoContent), "Expected 204 No Content for Unknown status") // Verify the status was NOT stored - listResp, err := client.GetNodePoolsStatusesWithResponse(ctx, nodePool.OwnerID, nodePool.ID, nil, test.WithAuthToken(ctx)) + listResp, err := client.GetNodePoolsStatusesWithResponse( + ctx, nodePool.OwnerID, nodePool.ID, nil, test.WithAuthToken(ctx), + ) Expect(err).NotTo(HaveOccurred()) Expect(listResp.JSON200).NotTo(BeNil()) // Check that no adapter status with "test-nodepool-adapter-unknown" exists for _, s := range listResp.JSON200.Items { - Expect(s.Adapter).NotTo(Equal("test-nodepool-adapter-unknown"), "Unknown status should not be stored") + Expect(s.Adapter).NotTo(Equal("test-nodepool-adapter-unknown"), + "Unknown status should not be stored") } } -// TestClusterStatusPost_MultipleConditionsWithUnknownAvailable tests that Unknown Available is detected among multiple conditions +// TestClusterStatusPost_MultipleConditionsWithUnknownAvailable tests that +// Unknown Available is detected among multiple conditions func TestClusterStatusPost_MultipleConditionsWithUnknownAvailable(t *testing.T) { h, client := test.RegisterIntegration(t) @@ -408,9 +444,13 @@ func TestClusterStatusPost_MultipleConditionsWithUnknownAvailable(t *testing.T) nil, ) - resp, err := client.PostClusterStatusesWithResponse(ctx, cluster.ID, openapi.PostClusterStatusesJSONRequestBody(statusInput), test.WithAuthToken(ctx)) + resp, err := client.PostClusterStatusesWithResponse( + ctx, cluster.ID, + openapi.PostClusterStatusesJSONRequestBody(statusInput), test.WithAuthToken(ctx), + ) Expect(err).NotTo(HaveOccurred(), "Error posting cluster status: %v", err) - Expect(resp.StatusCode()).To(Equal(http.StatusNoContent), "Expected 204 No Content when Available=Unknown among multiple conditions") + Expect(resp.StatusCode()).To(Equal(http.StatusNoContent), + "Expected 204 No Content when Available=Unknown among multiple conditions") } // TestAdapterStatusPagingEdgeCases tests edge cases in pagination @@ -437,7 +477,10 @@ func TestAdapterStatusPagingEdgeCases(t *testing.T) { }, nil, ) - _, err := client.PostClusterStatusesWithResponse(ctx, cluster.ID, openapi.PostClusterStatusesJSONRequestBody(statusInput), test.WithAuthToken(ctx)) + _, err := client.PostClusterStatusesWithResponse( + ctx, cluster.ID, + openapi.PostClusterStatusesJSONRequestBody(statusInput), test.WithAuthToken(ctx), + ) Expect(err).NotTo(HaveOccurred()) } @@ -479,7 +522,10 @@ func TestAdapterStatusPagingEdgeCases(t *testing.T) { }, nil, ) - _, err = client.PostClusterStatusesWithResponse(ctx, singleCluster.ID, openapi.PostClusterStatusesJSONRequestBody(singleStatus), test.WithAuthToken(ctx)) + _, err = client.PostClusterStatusesWithResponse( + ctx, singleCluster.ID, + openapi.PostClusterStatusesJSONRequestBody(singleStatus), test.WithAuthToken(ctx), + ) Expect(err).NotTo(HaveOccurred()) singleResp, err := client.GetClusterStatusesWithResponse(ctx, singleCluster.ID, nil, test.WithAuthToken(ctx)) diff --git a/test/integration/clusters_test.go b/test/integration/clusters_test.go index 1263371..ec672b2 100644 --- a/test/integration/clusters_test.go +++ b/test/integration/clusters_test.go @@ -65,7 +65,9 @@ func TestClusterPost(t *testing.T) { } // 201 Created - resp, err := client.PostClusterWithResponse(ctx, openapi.PostClusterJSONRequestBody(clusterInput), test.WithAuthToken(ctx)) + resp, err := client.PostClusterWithResponse( + ctx, openapi.PostClusterJSONRequestBody(clusterInput), test.WithAuthToken(ctx), + ) Expect(err).NotTo(HaveOccurred(), "Error posting object: %v", err) Expect(resp.StatusCode()).To(Equal(http.StatusCreated)) @@ -218,19 +220,26 @@ func TestClusterDuplicateNames(t *testing.T) { Spec: map[string]interface{}{"test": "spec1"}, } - resp, err := client.PostClusterWithResponse(ctx, openapi.PostClusterJSONRequestBody(clusterInput), test.WithAuthToken(ctx)) + resp, err := client.PostClusterWithResponse( + ctx, openapi.PostClusterJSONRequestBody(clusterInput), test.WithAuthToken(ctx), + ) Expect(err).NotTo(HaveOccurred()) Expect(resp.StatusCode()).To(Equal(http.StatusCreated)) id1 := *resp.JSON201.Id // Create second cluster with the SAME name // Names are unique, so this should return 409 Conflict - resp, err = client.PostClusterWithResponse(ctx, openapi.PostClusterJSONRequestBody(clusterInput), test.WithAuthToken(ctx)) + resp, err = client.PostClusterWithResponse( + ctx, openapi.PostClusterJSONRequestBody(clusterInput), test.WithAuthToken(ctx), + ) Expect(err).NotTo(HaveOccurred()) - Expect(resp.StatusCode()).To(Equal(http.StatusConflict), "Expected 409 Conflict for duplicate name") + Expect(resp.StatusCode()). + To(Equal(http.StatusConflict), "Expected 409 Conflict for duplicate name") // Verify first cluster still exists - getResp, err := client.GetClusterByIdWithResponse(ctx, id1, nil, test.WithAuthToken(ctx)) + getResp, err := client.GetClusterByIdWithResponse( + ctx, id1, nil, test.WithAuthToken(ctx), + ) Expect(err).NotTo(HaveOccurred()) Expect(getResp.JSON200.Name).To(Equal("duplicate-name-test")) } @@ -254,7 +263,9 @@ func TestClusterBoundaryValues(t *testing.T) { Spec: map[string]interface{}{"test": "spec"}, } - resp, err := client.PostClusterWithResponse(ctx, openapi.PostClusterJSONRequestBody(longNameInput), test.WithAuthToken(ctx)) + resp, err := client.PostClusterWithResponse( + ctx, openapi.PostClusterJSONRequestBody(longNameInput), test.WithAuthToken(ctx), + ) Expect(err).NotTo(HaveOccurred(), "Should accept name up to 63 characters") Expect(resp.StatusCode()).To(Equal(http.StatusCreated)) Expect(resp.JSON201.Name).To(Equal(longName)) @@ -266,9 +277,12 @@ func TestClusterBoundaryValues(t *testing.T) { Name: tooLongName, Spec: map[string]interface{}{"test": "spec"}, } - resp, err = client.PostClusterWithResponse(ctx, openapi.PostClusterJSONRequestBody(tooLongInput), test.WithAuthToken(ctx)) + resp, err = client.PostClusterWithResponse( + ctx, openapi.PostClusterJSONRequestBody(tooLongInput), test.WithAuthToken(ctx), + ) Expect(err).NotTo(HaveOccurred()) - Expect(resp.StatusCode()).To(Equal(http.StatusBadRequest), "Should reject name exceeding 63 characters") + Expect(resp.StatusCode()). + To(Equal(http.StatusBadRequest), "Should reject name exceeding 63 characters") // Test 2: Empty name emptyNameInput := openapi.ClusterCreateRequest{ @@ -277,9 +291,12 @@ func TestClusterBoundaryValues(t *testing.T) { Spec: map[string]interface{}{"test": "spec"}, } - resp, err = client.PostClusterWithResponse(ctx, openapi.PostClusterJSONRequestBody(emptyNameInput), test.WithAuthToken(ctx)) + resp, err = client.PostClusterWithResponse( + ctx, openapi.PostClusterJSONRequestBody(emptyNameInput), test.WithAuthToken(ctx), + ) Expect(err).NotTo(HaveOccurred()) - Expect(resp.StatusCode()).To(Equal(http.StatusBadRequest), "Should reject empty name") + Expect(resp.StatusCode()). + To(Equal(http.StatusBadRequest), "Should reject empty name") // Test 3: Large spec JSON (test with ~10KB JSON) largeSpec := make(map[string]interface{}) @@ -293,12 +310,16 @@ func TestClusterBoundaryValues(t *testing.T) { Spec: largeSpec, } - resp, err = client.PostClusterWithResponse(ctx, openapi.PostClusterJSONRequestBody(largeSpecInput), test.WithAuthToken(ctx)) + resp, err = client.PostClusterWithResponse( + ctx, openapi.PostClusterJSONRequestBody(largeSpecInput), test.WithAuthToken(ctx), + ) Expect(err).NotTo(HaveOccurred(), "Should accept large spec JSON") Expect(resp.StatusCode()).To(Equal(http.StatusCreated)) // Verify the spec was stored correctly - getResp, err := client.GetClusterByIdWithResponse(ctx, *resp.JSON201.Id, nil, test.WithAuthToken(ctx)) + getResp, err := client.GetClusterByIdWithResponse( + ctx, *resp.JSON201.Id, nil, test.WithAuthToken(ctx), + ) Expect(err).NotTo(HaveOccurred()) Expect(len(getResp.JSON200.Spec)).To(Equal(100)) @@ -309,9 +330,12 @@ func TestClusterBoundaryValues(t *testing.T) { Spec: map[string]interface{}{"test": "spec"}, } - resp, err = client.PostClusterWithResponse(ctx, openapi.PostClusterJSONRequestBody(unicodeNameInput), test.WithAuthToken(ctx)) + resp, err = client.PostClusterWithResponse( + ctx, openapi.PostClusterJSONRequestBody(unicodeNameInput), test.WithAuthToken(ctx), + ) Expect(err).NotTo(HaveOccurred()) - Expect(resp.StatusCode()).To(Equal(http.StatusBadRequest), "Should reject unicode in name (pattern is ^[a-z0-9-]+$)") + Expect(resp.StatusCode()).To(Equal(http.StatusBadRequest), + "Should reject unicode in name (pattern is ^[a-z0-9-]+$)") } // TestClusterSchemaValidation tests schema validation for cluster specs @@ -336,7 +360,9 @@ func TestClusterSchemaValidation(t *testing.T) { Spec: validSpec, } - resp, err := client.PostClusterWithResponse(ctx, openapi.PostClusterJSONRequestBody(validInput), test.WithAuthToken(ctx)) + resp, err := client.PostClusterWithResponse( + ctx, openapi.PostClusterJSONRequestBody(validInput), test.WithAuthToken(ctx), + ) Expect(err).NotTo(HaveOccurred(), "Valid spec should be accepted") Expect(resp.StatusCode()).To(Equal(http.StatusCreated)) Expect(*resp.JSON201.Id).NotTo(BeEmpty()) @@ -377,7 +403,9 @@ func TestClusterSchemaValidation(t *testing.T) { Spec: map[string]interface{}{}, } - resp3, err := client.PostClusterWithResponse(ctx, openapi.PostClusterJSONRequestBody(emptySpecInput), test.WithAuthToken(ctx)) + resp3, err := client.PostClusterWithResponse( + ctx, openapi.PostClusterJSONRequestBody(emptySpecInput), test.WithAuthToken(ctx), + ) Expect(err).NotTo(HaveOccurred(), "Empty spec should be accepted by base schema") Expect(resp3.StatusCode()).To(Equal(http.StatusCreated)) Expect(*resp3.JSON201.Id).NotTo(BeEmpty()) @@ -419,9 +447,12 @@ func TestClusterSchemaValidationWithProviderSchema(t *testing.T) { Spec: invalidSpec, } - resp, err := client.PostClusterWithResponse(ctx, openapi.PostClusterJSONRequestBody(invalidInput), test.WithAuthToken(ctx)) + resp, err := client.PostClusterWithResponse( + ctx, openapi.PostClusterJSONRequestBody(invalidInput), test.WithAuthToken(ctx), + ) Expect(err).NotTo(HaveOccurred()) - Expect(resp.StatusCode()).To(Equal(http.StatusBadRequest), "Should reject spec with missing required field") + Expect(resp.StatusCode()).To(Equal(http.StatusBadRequest), + "Should reject spec with missing required field") // Parse error response to verify field-level details bodyBytes, err := io.ReadAll(resp.HTTPResponse.Body) @@ -523,7 +554,9 @@ func TestClusterList_DefaultSorting(t *testing.T) { Spec: map[string]interface{}{"test": fmt.Sprintf("value-%d", i)}, } - resp, err := client.PostClusterWithResponse(ctx, openapi.PostClusterJSONRequestBody(clusterInput), test.WithAuthToken(ctx)) + resp, err := client.PostClusterWithResponse( + ctx, openapi.PostClusterJSONRequestBody(clusterInput), test.WithAuthToken(ctx), + ) Expect(err).NotTo(HaveOccurred(), "Failed to create cluster %d", i) createdClusters = append(createdClusters, *resp.JSON201) @@ -532,7 +565,9 @@ func TestClusterList_DefaultSorting(t *testing.T) { } // List clusters without orderBy parameter - should default to created_time desc - listResp, err := client.GetClustersWithResponse(ctx, nil, test.WithAuthToken(ctx)) + listResp, err := client.GetClustersWithResponse( + ctx, nil, test.WithAuthToken(ctx), + ) Expect(err).NotTo(HaveOccurred(), "Failed to list clusters") list := listResp.JSON200 Expect(list).NotTo(BeNil()) @@ -582,7 +617,9 @@ func TestClusterList_OrderByName(t *testing.T) { Spec: map[string]interface{}{"test": "value"}, } - _, err := client.PostClusterWithResponse(ctx, openapi.PostClusterJSONRequestBody(clusterInput), test.WithAuthToken(ctx)) + _, err := client.PostClusterWithResponse( + ctx, openapi.PostClusterJSONRequestBody(clusterInput), test.WithAuthToken(ctx), + ) Expect(err).NotTo(HaveOccurred(), "Failed to create cluster %s", name) } @@ -638,7 +675,9 @@ func TestClusterList_OrderByNameDesc(t *testing.T) { Spec: map[string]interface{}{"test": "value"}, } - _, err := client.PostClusterWithResponse(ctx, openapi.PostClusterJSONRequestBody(clusterInput), test.WithAuthToken(ctx)) + _, err := client.PostClusterWithResponse( + ctx, openapi.PostClusterJSONRequestBody(clusterInput), test.WithAuthToken(ctx), + ) Expect(err).NotTo(HaveOccurred(), "Failed to create cluster %s", name) } diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index a784e41..5f22290 100755 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -37,7 +37,8 @@ func TestMain(m *testing.M) { // Verify the schema file exists before setting the env var if _, err := os.Stat(schemaPath); err != nil { - logger.With(ctx, logger.FieldSchemaPath, schemaPath).WithError(err).Warn("Schema file not found, skipping OPENAPI_SCHEMA_PATH setup") + logger.With(ctx, logger.FieldSchemaPath, schemaPath).WithError(err). + Warn("Schema file not found, skipping OPENAPI_SCHEMA_PATH setup") } else { _ = os.Setenv("OPENAPI_SCHEMA_PATH", schemaPath) logger.With(ctx, logger.FieldSchemaPath, schemaPath).Info("Set OPENAPI_SCHEMA_PATH for integration tests") diff --git a/test/integration/metadata_test.go b/test/integration/metadata_test.go index 96779b6..2e44d5d 100755 --- a/test/integration/metadata_test.go +++ b/test/integration/metadata_test.go @@ -29,13 +29,18 @@ import ( "github.com/openshift-hyperfleet/hyperfleet-api/test" ) +const ( + protocolHTTP = "http" + protocolHTTPS = "https" +) + func TestMetadataGet(t *testing.T) { h, _ := test.RegisterIntegration(t) // Build the metadata URL (metadata endpoint is at /api/hyperfleet, not /api/hyperfleet/v1) - protocol := "http" + protocol := protocolHTTP if h.AppConfig.Server.EnableHTTPS { - protocol = "https" + protocol = protocolHTTPS } metadataURL := fmt.Sprintf("%s://%s/api/hyperfleet", protocol, h.AppConfig.Server.BindAddress) diff --git a/test/integration/node_pools_test.go b/test/integration/node_pools_test.go index d90601f..c6796f4 100644 --- a/test/integration/node_pools_test.go +++ b/test/integration/node_pools_test.go @@ -66,7 +66,9 @@ func TestNodePoolPost(t *testing.T) { } // 201 Created - resp, err := client.CreateNodePoolWithResponse(ctx, cluster.ID, openapi.CreateNodePoolJSONRequestBody(nodePoolInput), test.WithAuthToken(ctx)) + resp, err := client.CreateNodePoolWithResponse( + ctx, cluster.ID, openapi.CreateNodePoolJSONRequestBody(nodePoolInput), test.WithAuthToken(ctx), + ) Expect(err).NotTo(HaveOccurred(), "Error posting object: %v", err) Expect(resp.StatusCode()).To(Equal(http.StatusCreated)) @@ -74,7 +76,8 @@ func TestNodePoolPost(t *testing.T) { Expect(nodePoolOutput).NotTo(BeNil()) Expect(*nodePoolOutput.Id).NotTo(BeEmpty(), "Expected ID assigned on creation") Expect(*nodePoolOutput.Kind).To(Equal("NodePool")) - Expect(*nodePoolOutput.Href).To(Equal(fmt.Sprintf("/api/hyperfleet/v1/clusters/%s/nodepools/%s", cluster.ID, *nodePoolOutput.Id))) + Expect(*nodePoolOutput.Href). + To(Equal(fmt.Sprintf("/api/hyperfleet/v1/clusters/%s/nodepools/%s", cluster.ID, *nodePoolOutput.Id))) // 400 bad request. posting junk json is one way to trigger 400. jwtToken := test.GetAccessTokenFromContext(ctx) @@ -194,7 +197,9 @@ func TestGetNodePoolByClusterIdAndNodePoolId(t *testing.T) { Spec: map[string]interface{}{"instance_type": "m5.large", "replicas": 2}, } - createResp, err := client.CreateNodePoolWithResponse(ctx, cluster.ID, openapi.CreateNodePoolJSONRequestBody(nodePoolInput), test.WithAuthToken(ctx)) + createResp, err := client.CreateNodePoolWithResponse( + ctx, cluster.ID, openapi.CreateNodePoolJSONRequestBody(nodePoolInput), test.WithAuthToken(ctx), + ) Expect(err).NotTo(HaveOccurred(), "Error creating nodepool: %v", err) Expect(createResp.StatusCode()).To(Equal(http.StatusCreated)) Expect(*createResp.JSON201.Id).NotTo(BeEmpty()) @@ -214,20 +219,27 @@ func TestGetNodePoolByClusterIdAndNodePoolId(t *testing.T) { // Test 2: Try to get with non-existent nodepool ID (404) notFoundResp, err := client.GetNodePoolByIdWithResponse(ctx, cluster.ID, "non-existent-id", test.WithAuthToken(ctx)) Expect(err).NotTo(HaveOccurred()) - Expect(notFoundResp.StatusCode()).To(Equal(http.StatusNotFound), "Expected 404 for non-existent nodepool") + Expect(notFoundResp.StatusCode()). + To(Equal(http.StatusNotFound), "Expected 404 for non-existent nodepool") // Test 3: Try to get with non-existent cluster ID (404) - notFoundResp, err = client.GetNodePoolByIdWithResponse(ctx, "non-existent-cluster", nodePoolID, test.WithAuthToken(ctx)) + notFoundResp, err = client.GetNodePoolByIdWithResponse( + ctx, "non-existent-cluster", nodePoolID, test.WithAuthToken(ctx), + ) Expect(err).NotTo(HaveOccurred()) - Expect(notFoundResp.StatusCode()).To(Equal(http.StatusNotFound), "Expected 404 for non-existent cluster") + Expect(notFoundResp.StatusCode()). + To(Equal(http.StatusNotFound), "Expected 404 for non-existent cluster") // Test 4: Create another cluster and verify that nodepool is not accessible from wrong cluster cluster2, err := h.Factories.NewClusters(h.NewID()) Expect(err).NotTo(HaveOccurred()) - wrongClusterResp, err := client.GetNodePoolByIdWithResponse(ctx, cluster2.ID, nodePoolID, test.WithAuthToken(ctx)) + wrongClusterResp, err := client.GetNodePoolByIdWithResponse( + ctx, cluster2.ID, nodePoolID, test.WithAuthToken(ctx), + ) Expect(err).NotTo(HaveOccurred()) - Expect(wrongClusterResp.StatusCode()).To(Equal(http.StatusNotFound), "Expected 404 when accessing nodepool from wrong cluster") + Expect(wrongClusterResp.StatusCode()).To(Equal(http.StatusNotFound), + "Expected 404 when accessing nodepool from wrong cluster") } // TestNodePoolPost_EmptyKind tests that empty kind field returns 400 diff --git a/test/mocks/jwk_cert_server.go b/test/mocks/jwk_cert_server.go index 73ae298..3cca277 100755 --- a/test/mocks/jwk_cert_server.go +++ b/test/mocks/jwk_cert_server.go @@ -14,7 +14,12 @@ const ( certEndpoint = "/auth/realms/rhd/protocol/openid-connect/certs" ) -func NewJWKCertServerMock(t *testing.T, pubKey crypto.PublicKey, jwkKID string, jwkAlg string) (url string, teardown func() error) { +func NewJWKCertServerMock( + t *testing.T, + pubKey crypto.PublicKey, + jwkKID string, + jwkAlg string, +) (url string, teardown func() error) { certHandler := http.NewServeMux() certHandler.HandleFunc(certEndpoint, func(w http.ResponseWriter, r *http.Request) { diff --git a/test/mocks/ocm.go b/test/mocks/ocm.go index ff8ce78..0f77de3 100755 --- a/test/mocks/ocm.go +++ b/test/mocks/ocm.go @@ -47,13 +47,19 @@ func NewOCMAuthzValidatorMockClient() (*OCMAuthzValidatorMock, *ocm.Client) { return authz, client } -func (m *OCMAuthzValidatorMock) SelfAccessReview(ctx context.Context, action, resourceType, organizationID, subscriptionID, clusterID string) (allowed bool, err error) { +func (m *OCMAuthzValidatorMock) SelfAccessReview( + ctx context.Context, + action, resourceType, organizationID, subscriptionID, clusterID string, +) (allowed bool, err error) { m.Action = action m.ResourceType = resourceType return true, nil } -func (m *OCMAuthzValidatorMock) AccessReview(ctx context.Context, username, action, resourceType, organizationID, subscriptionID, clusterID string) (allowed bool, err error) { +func (m *OCMAuthzValidatorMock) AccessReview( + ctx context.Context, + username, action, resourceType, organizationID, subscriptionID, clusterID string, +) (allowed bool, err error) { m.Action = action m.ResourceType = resourceType return true, nil