From 98157649831a464effcc1ee148c71938a682295a Mon Sep 17 00:00:00 2001 From: yasun Date: Wed, 7 Jan 2026 13:32:31 +0800 Subject: [PATCH 1/3] feat(logger): implement slog-based logger foundation Add slog logger with config, context & testing update --- cmd/hyperfleet-api/servecmd/cmd.go | 22 +- pkg/config/config.go | 4 + pkg/config/logging.go | 125 ++++++ pkg/config/logging_test.go | 348 ++++++++++++++++ pkg/logger/context.go | 100 +++++ pkg/logger/logger.go | 281 ++++++++++++- pkg/logger/logger_test.go | 572 +++++++++++++++++++++++++++ pkg/logger/operationid_middleware.go | 24 -- 8 files changed, 1430 insertions(+), 46 deletions(-) create mode 100644 pkg/config/logging.go create mode 100644 pkg/config/logging_test.go create mode 100644 pkg/logger/context.go create mode 100644 pkg/logger/logger_test.go diff --git a/cmd/hyperfleet-api/servecmd/cmd.go b/cmd/hyperfleet-api/servecmd/cmd.go index dce3fd3..62a82c6 100755 --- a/cmd/hyperfleet-api/servecmd/cmd.go +++ b/cmd/hyperfleet-api/servecmd/cmd.go @@ -1,11 +1,16 @@ package servecmd import ( + "context" + "log/slog" + "os" + "github.com/golang/glog" "github.com/spf13/cobra" "github.com/openshift-hyperfleet/hyperfleet-api/cmd/hyperfleet-api/environments" "github.com/openshift-hyperfleet/hyperfleet-api/cmd/hyperfleet-api/server" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/logger" ) func NewServeCommand() *cobra.Command { @@ -29,6 +34,20 @@ func runServe(cmd *cobra.Command, args []string) { glog.Fatalf("Unable to initialize environment: %s", err.Error()) } + // Initialize slog logger (demonstration only, full migration in PR 3) + ctx := context.Background() + hostname, _ := os.Hostname() + logConfig := &logger.LogConfig{ + Level: slog.LevelInfo, + Format: logger.FormatJSON, + Output: os.Stdout, + Component: "api", + Version: "dev", + Hostname: hostname, + } + logger.InitGlobalLogger(logConfig) + logger.Info(ctx, "New slog logger initialized (example)", "log_level", "info", "log_format", "json") + // Run the servers go func() { apiserver := server.NewAPIServer() @@ -45,8 +64,5 @@ func runServe(cmd *cobra.Command, args []string) { healthcheckServer.Start() }() - // REMOVED: ControllersServer - Sentinel handles orchestration - // Controllers are no longer run inside the API service - select {} } diff --git a/pkg/config/config.go b/pkg/config/config.go index f7c44ff..d5851d9 100755 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -18,6 +18,7 @@ type ApplicationConfig struct { HealthCheck *HealthCheckConfig `json:"health_check"` Database *DatabaseConfig `json:"database"` OCM *OCMConfig `json:"ocm"` + Logging *LoggingConfig `json:"logging"` } func NewApplicationConfig() *ApplicationConfig { @@ -27,6 +28,7 @@ func NewApplicationConfig() *ApplicationConfig { HealthCheck: NewHealthCheckConfig(), Database: NewDatabaseConfig(), OCM: NewOCMConfig(), + Logging: NewLoggingConfig(), } } @@ -37,6 +39,7 @@ func (c *ApplicationConfig) AddFlags(flagset *pflag.FlagSet) { c.HealthCheck.AddFlags(flagset) c.Database.AddFlags(flagset) c.OCM.AddFlags(flagset) + c.Logging.AddFlags(flagset) } func (c *ApplicationConfig) ReadFiles() []string { @@ -49,6 +52,7 @@ func (c *ApplicationConfig) ReadFiles() []string { {c.OCM.ReadFiles, "OCM"}, {c.Metrics.ReadFiles, "Metrics"}, {c.HealthCheck.ReadFiles, "HealthCheck"}, + {c.Logging.ReadFiles, "Logging"}, } var messages []string for _, rf := range readFiles { diff --git a/pkg/config/logging.go b/pkg/config/logging.go new file mode 100644 index 0000000..896a3d8 --- /dev/null +++ b/pkg/config/logging.go @@ -0,0 +1,125 @@ +package config + +import ( + "os" + "strconv" + "strings" + + "github.com/spf13/pflag" +) + +// LoggingConfig holds logging configuration +type LoggingConfig struct { + Level string `json:"log_level"` + Format string `json:"log_format"` + Output string `json:"log_output"` + + OTel OTelConfig `json:"otel"` + Masking MaskingConfig `json:"masking"` +} + +// OTelConfig holds OpenTelemetry configuration +type OTelConfig struct { + Enabled bool `json:"enabled"` + SamplingRate float64 `json:"sampling_rate"` +} + +// MaskingConfig holds data masking configuration +type MaskingConfig struct { + Enabled bool `json:"enabled"` + SensitiveHeaders string `json:"sensitive_headers"` + SensitiveFields string `json:"sensitive_fields"` +} + +// NewLoggingConfig creates a new LoggingConfig with default values +func NewLoggingConfig() *LoggingConfig { + return &LoggingConfig{ + Level: "info", + Format: "json", + Output: "stdout", + OTel: OTelConfig{ + Enabled: false, + SamplingRate: 1.0, + }, + Masking: MaskingConfig{ + Enabled: true, + SensitiveHeaders: "Authorization,X-API-Key,Cookie,X-Auth-Token", + SensitiveFields: "password,secret,token,api_key,access_token,refresh_token", + }, + } +} + +// AddFlags adds CLI flags for core logging configuration +func (l *LoggingConfig) AddFlags(fs *pflag.FlagSet) { + fs.StringVar(&l.Level, "log-level", l.Level, "Log level (debug, info, warn, error)") + fs.StringVar(&l.Format, "log-format", l.Format, "Log format (text, json)") + fs.StringVar(&l.Output, "log-output", l.Output, "Log output (stdout, stderr)") +} + +// ReadFiles satisfies the config interface +func (l *LoggingConfig) ReadFiles() error { + return nil +} + +// BindEnv reads configuration from environment variables +func (l *LoggingConfig) BindEnv() { + if val := os.Getenv("LOG_LEVEL"); val != "" { + l.Level = val + } + if val := os.Getenv("LOG_FORMAT"); val != "" { + l.Format = val + } + if val := os.Getenv("LOG_OUTPUT"); val != "" { + l.Output = val + } + + if val := os.Getenv("OTEL_ENABLED"); val != "" { + enabled, err := strconv.ParseBool(val) + if err == nil { + l.OTel.Enabled = enabled + } + } + if val := os.Getenv("OTEL_SAMPLING_RATE"); val != "" { + rate, err := strconv.ParseFloat(val, 64) + if err == nil { + l.OTel.SamplingRate = rate + } + } + + if val := os.Getenv("MASKING_ENABLED"); val != "" { + enabled, err := strconv.ParseBool(val) + if err == nil { + l.Masking.Enabled = enabled + } + } + if val := os.Getenv("MASKING_HEADERS"); val != "" { + l.Masking.SensitiveHeaders = val + } + if val := os.Getenv("MASKING_FIELDS"); val != "" { + l.Masking.SensitiveFields = val + } +} + +// GetSensitiveHeadersList parses comma-separated sensitive headers +func (l *LoggingConfig) GetSensitiveHeadersList() []string { + if l.Masking.SensitiveHeaders == "" { + return []string{} + } + headers := strings.Split(l.Masking.SensitiveHeaders, ",") + for i := range headers { + headers[i] = strings.TrimSpace(headers[i]) + } + return headers +} + +// GetSensitiveFieldsList parses comma-separated sensitive fields +func (l *LoggingConfig) GetSensitiveFieldsList() []string { + if l.Masking.SensitiveFields == "" { + return []string{} + } + fields := strings.Split(l.Masking.SensitiveFields, ",") + for i := range fields { + fields[i] = strings.TrimSpace(fields[i]) + } + return fields +} diff --git a/pkg/config/logging_test.go b/pkg/config/logging_test.go new file mode 100644 index 0000000..66ab189 --- /dev/null +++ b/pkg/config/logging_test.go @@ -0,0 +1,348 @@ +package config + +import ( + "os" + "testing" + + "github.com/spf13/pflag" +) + +// TestNewLoggingConfig_Defaults tests default configuration values +func TestNewLoggingConfig_Defaults(t *testing.T) { + cfg := NewLoggingConfig() + + tests := []struct { + name string + got interface{} + expected interface{} + }{ + {"Level", cfg.Level, "info"}, + {"Format", cfg.Format, "json"}, + {"Output", cfg.Output, "stdout"}, + {"OTel.Enabled", cfg.OTel.Enabled, false}, + {"OTel.SamplingRate", cfg.OTel.SamplingRate, 1.0}, + {"Masking.Enabled", cfg.Masking.Enabled, true}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.got != tt.expected { + t.Errorf("expected %v, got %v", tt.expected, tt.got) + } + }) + } + + // Check non-empty string fields + if cfg.Masking.SensitiveHeaders == "" { + t.Error("expected default SensitiveHeaders to be non-empty") + } + if cfg.Masking.SensitiveFields == "" { + t.Error("expected default SensitiveFields to be non-empty") + } +} + +// TestLoggingConfig_AddFlags tests CLI flag registration +func TestLoggingConfig_AddFlags(t *testing.T) { + cfg := NewLoggingConfig() + fs := pflag.NewFlagSet("test", pflag.ContinueOnError) + + cfg.AddFlags(fs) + + // Verify flags are registered + flags := []string{"log-level", "log-format", "log-output"} + for _, flagName := range flags { + t.Run("flag_"+flagName, func(t *testing.T) { + if fs.Lookup(flagName) == nil { + t.Errorf("expected %s flag to be registered", flagName) + } + }) + } + + // Test flag parsing + tests := []struct { + name string + args []string + expected map[string]string + }{ + { + name: "custom values", + args: []string{"--log-level=debug", "--log-format=text", "--log-output=stderr"}, + expected: map[string]string{ + "level": "debug", + "format": "text", + "output": "stderr", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg := NewLoggingConfig() + fs := pflag.NewFlagSet("test", pflag.ContinueOnError) + cfg.AddFlags(fs) + + if err := fs.Parse(tt.args); err != nil { + t.Fatalf("failed to parse flags: %v", err) + } + + if cfg.Level != tt.expected["level"] { + t.Errorf("expected Level '%s', got '%s'", tt.expected["level"], cfg.Level) + } + if cfg.Format != tt.expected["format"] { + t.Errorf("expected Format '%s', got '%s'", tt.expected["format"], cfg.Format) + } + if cfg.Output != tt.expected["output"] { + t.Errorf("expected Output '%s', got '%s'", tt.expected["output"], cfg.Output) + } + }) + } +} + +// TestLoggingConfig_ReadFiles tests that ReadFiles returns nil (no file-based config) +func TestLoggingConfig_ReadFiles(t *testing.T) { + cfg := NewLoggingConfig() + if err := cfg.ReadFiles(); err != nil { + t.Errorf("expected ReadFiles to return nil, got %v", err) + } +} + +// TestLoggingConfig_BindEnv tests environment variable binding +func TestLoggingConfig_BindEnv(t *testing.T) { + tests := []struct { + name string + envVars map[string]string + validate func(*testing.T, *LoggingConfig) + }{ + { + name: "basic logging env vars", + envVars: map[string]string{ + "LOG_LEVEL": "debug", + "LOG_FORMAT": "text", + "LOG_OUTPUT": "stderr", + }, + validate: func(t *testing.T, cfg *LoggingConfig) { + if cfg.Level != "debug" { + t.Errorf("expected Level 'debug', got '%s'", cfg.Level) + } + if cfg.Format != "text" { + t.Errorf("expected Format 'text', got '%s'", cfg.Format) + } + if cfg.Output != "stderr" { + t.Errorf("expected Output 'stderr', got '%s'", cfg.Output) + } + }, + }, + { + name: "otel env vars", + envVars: map[string]string{ + "OTEL_ENABLED": "true", + "OTEL_SAMPLING_RATE": "0.5", + }, + validate: func(t *testing.T, cfg *LoggingConfig) { + if cfg.OTel.Enabled != true { + t.Errorf("expected OTel.Enabled true, got %t", cfg.OTel.Enabled) + } + if cfg.OTel.SamplingRate != 0.5 { + t.Errorf("expected OTel.SamplingRate 0.5, got %f", cfg.OTel.SamplingRate) + } + }, + }, + { + name: "masking env vars", + envVars: map[string]string{ + "MASKING_ENABLED": "false", + "MASKING_HEADERS": "Custom-Header,Another-Header", + "MASKING_FIELDS": "custom_field,another_field", + }, + validate: func(t *testing.T, cfg *LoggingConfig) { + if cfg.Masking.Enabled != false { + t.Errorf("expected Masking.Enabled false, got %t", cfg.Masking.Enabled) + } + if cfg.Masking.SensitiveHeaders != "Custom-Header,Another-Header" { + t.Errorf("expected custom SensitiveHeaders, got '%s'", cfg.Masking.SensitiveHeaders) + } + if cfg.Masking.SensitiveFields != "custom_field,another_field" { + t.Errorf("expected custom SensitiveFields, got '%s'", cfg.Masking.SensitiveFields) + } + }, + }, + { + name: "invalid bool value keeps default", + envVars: map[string]string{ + "OTEL_ENABLED": "not-a-bool", + }, + validate: func(t *testing.T, cfg *LoggingConfig) { + if cfg.OTel.Enabled != false { + t.Errorf("expected OTel.Enabled to keep default (false), got %t", cfg.OTel.Enabled) + } + }, + }, + { + name: "invalid float value keeps default", + envVars: map[string]string{ + "OTEL_SAMPLING_RATE": "not-a-float", + }, + validate: func(t *testing.T, cfg *LoggingConfig) { + if cfg.OTel.SamplingRate != 1.0 { + t.Errorf("expected OTel.SamplingRate to keep default (1.0), got %f", cfg.OTel.SamplingRate) + } + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Save and restore env vars + oldEnvs := make(map[string]string) + for key := range tt.envVars { + oldEnvs[key] = os.Getenv(key) + } + defer func() { + for key, val := range oldEnvs { + if val == "" { + _ = os.Unsetenv(key) + } else { + _ = os.Setenv(key, val) + } + } + }() + + // Set env vars + for key, val := range tt.envVars { + if err := os.Setenv(key, val); err != nil { + t.Fatalf("failed to set env var %s: %v", key, err) + } + } + + cfg := NewLoggingConfig() + cfg.BindEnv() + + tt.validate(t, cfg) + }) + } +} + +// TestLoggingConfig_GetSensitiveHeadersList tests parsing of comma-separated headers +func TestLoggingConfig_GetSensitiveHeadersList(t *testing.T) { + tests := []struct { + name string + input string + expected []string + }{ + { + name: "standard list", + input: "Authorization,X-API-Key,Cookie", + expected: []string{"Authorization", "X-API-Key", "Cookie"}, + }, + { + name: "with whitespace", + input: " Authorization , X-API-Key , Cookie ", + expected: []string{"Authorization", "X-API-Key", "Cookie"}, + }, + { + name: "empty string", + input: "", + expected: []string{}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg := NewLoggingConfig() + cfg.Masking.SensitiveHeaders = tt.input + + headers := cfg.GetSensitiveHeadersList() + + if len(headers) != len(tt.expected) { + t.Fatalf("expected %d headers, got %d", len(tt.expected), len(headers)) + } + for i, h := range tt.expected { + if headers[i] != h { + t.Errorf("expected header[%d] '%s', got '%s'", i, h, headers[i]) + } + } + }) + } +} + +// TestLoggingConfig_GetSensitiveFieldsList tests parsing of comma-separated fields +func TestLoggingConfig_GetSensitiveFieldsList(t *testing.T) { + tests := []struct { + name string + input string + expected []string + }{ + { + name: "standard list", + input: "password,secret,token", + expected: []string{"password", "secret", "token"}, + }, + { + name: "with whitespace", + input: " password , secret , token ", + expected: []string{"password", "secret", "token"}, + }, + { + name: "empty string", + input: "", + expected: []string{}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg := NewLoggingConfig() + cfg.Masking.SensitiveFields = tt.input + + fields := cfg.GetSensitiveFieldsList() + + if len(fields) != len(tt.expected) { + t.Fatalf("expected %d fields, got %d", len(tt.expected), len(fields)) + } + for i, f := range tt.expected { + if fields[i] != f { + t.Errorf("expected field[%d] '%s', got '%s'", i, f, fields[i]) + } + } + }) + } +} + +// TestLoggingConfig_EnvOverridesFlags tests that environment variables override CLI flags +func TestLoggingConfig_EnvOverridesFlags(t *testing.T) { + // Save and restore env var + oldLevel := os.Getenv("LOG_LEVEL") + defer func() { + if oldLevel == "" { + _ = os.Unsetenv("LOG_LEVEL") + } else { + _ = os.Setenv("LOG_LEVEL", oldLevel) + } + }() + + // Set env var + if err := os.Setenv("LOG_LEVEL", "error"); err != nil { + t.Fatalf("failed to set LOG_LEVEL: %v", err) + } + + cfg := NewLoggingConfig() + fs := pflag.NewFlagSet("test", pflag.ContinueOnError) + cfg.AddFlags(fs) + + // Parse flags with different value + args := []string{"--log-level=debug"} + if err := fs.Parse(args); err != nil { + t.Fatalf("failed to parse flags: %v", err) + } + + // Before BindEnv, should have flag value + if cfg.Level != "debug" { + t.Errorf("expected Level 'debug' from flag, got '%s'", cfg.Level) + } + + // After BindEnv, env var should override + cfg.BindEnv() + if cfg.Level != "error" { + t.Errorf("expected Level 'error' from env (after BindEnv), got '%s'", cfg.Level) + } +} diff --git a/pkg/logger/context.go b/pkg/logger/context.go new file mode 100644 index 0000000..6e72082 --- /dev/null +++ b/pkg/logger/context.go @@ -0,0 +1,100 @@ +package logger + +import ( + "context" + + "github.com/segmentio/ksuid" +) + +// Context key types for type safety +type OperationIDKey string +type TraceIDKey string +type SpanIDKey string +type ClusterIDKey string +type ResourceTypeKey string +type ResourceIDKey string + +// Context keys for storing correlation fields +const ( + OpIDKey OperationIDKey = "opID" // Keep existing value for compatibility + OpIDHeader OperationIDKey = "X-Operation-ID" // HTTP header for operation ID + TraceIDCtxKey TraceIDKey = "trace_id" + SpanIDCtxKey SpanIDKey = "span_id" + ClusterIDCtxKey ClusterIDKey = "cluster_id" + ResourceTypeCtxKey ResourceTypeKey = "resource_type" + ResourceIDCtxKey ResourceIDKey = "resource_id" +) + +// WithTraceID adds trace ID to context +func WithTraceID(ctx context.Context, traceID string) context.Context { + return context.WithValue(ctx, TraceIDCtxKey, traceID) +} + +// WithSpanID adds span ID to context +func WithSpanID(ctx context.Context, spanID string) context.Context { + return context.WithValue(ctx, SpanIDCtxKey, spanID) +} + +// WithClusterID adds cluster ID to context +func WithClusterID(ctx context.Context, clusterID string) context.Context { + return context.WithValue(ctx, ClusterIDCtxKey, clusterID) +} + +// WithResourceType adds resource type to context +func WithResourceType(ctx context.Context, resourceType string) context.Context { + return context.WithValue(ctx, ResourceTypeCtxKey, resourceType) +} + +// WithResourceID adds resource ID to context +func WithResourceID(ctx context.Context, resourceID string) context.Context { + return context.WithValue(ctx, ResourceIDCtxKey, resourceID) +} + +// WithOpID adds operation ID to context +// If operation ID already exists in context, it returns the context unchanged +// Otherwise, it generates a new KSUID and adds it to the context +func WithOpID(ctx context.Context) context.Context { + if ctx.Value(OpIDKey) != nil { + return ctx + } + opID := ksuid.New().String() + return context.WithValue(ctx, OpIDKey, opID) +} + +// GetOperationID retrieves operation ID from context +func GetOperationID(ctx context.Context) string { + if opID, ok := ctx.Value(OpIDKey).(string); ok { + return opID + } + return "" +} + +// GetTraceID retrieves trace ID from context +func GetTraceID(ctx context.Context) (string, bool) { + traceID, ok := ctx.Value(TraceIDCtxKey).(string) + return traceID, ok +} + +// GetSpanID retrieves span ID from context +func GetSpanID(ctx context.Context) (string, bool) { + spanID, ok := ctx.Value(SpanIDCtxKey).(string) + return spanID, ok +} + +// GetClusterID retrieves cluster ID from context +func GetClusterID(ctx context.Context) (string, bool) { + clusterID, ok := ctx.Value(ClusterIDCtxKey).(string) + return clusterID, ok +} + +// GetResourceType retrieves resource type from context +func GetResourceType(ctx context.Context) (string, bool) { + resourceType, ok := ctx.Value(ResourceTypeCtxKey).(string) + return resourceType, ok +} + +// GetResourceID retrieves resource ID from context +func GetResourceID(ctx context.Context) (string, bool) { + resourceID, ok := ctx.Value(ResourceIDCtxKey).(string) + return resourceID, ok +} diff --git a/pkg/logger/logger.go b/pkg/logger/logger.go index dafca42..107b9a9 100755 --- a/pkg/logger/logger.go +++ b/pkg/logger/logger.go @@ -3,12 +3,256 @@ package logger import ( "context" "fmt" + "io" + "log/slog" + "os" + "runtime" "strings" "github.com/golang/glog" "github.com/openshift-hyperfleet/hyperfleet-api/pkg/util" ) +// LogFormat enumeration for output format +type LogFormat int + +const ( + // FormatText outputs logs in human-readable text format + FormatText LogFormat = iota + // FormatJSON outputs logs in JSON format for structured logging + FormatJSON +) + +// LogConfig holds the configuration for the logger +type LogConfig struct { + Level slog.Level + Format LogFormat + Output io.Writer + Component string + Version string + Hostname string +} + +// HyperFleetHandler implements slog.Handler interface +// Adds HyperFleet-specific fields: component, version, hostname, trace_id, span_id, etc. +type HyperFleetHandler struct { + handler slog.Handler + component string + version string + hostname string +} + +// NewHyperFleetHandler creates a custom slog handler with HyperFleet-specific fields +func NewHyperFleetHandler(cfg *LogConfig) *HyperFleetHandler { + var baseHandler slog.Handler + opts := &slog.HandlerOptions{ + Level: cfg.Level, + AddSource: true, + ReplaceAttr: func(groups []string, a slog.Attr) slog.Attr { + if a.Key == slog.TimeKey { + return slog.Attr{Key: "timestamp", Value: a.Value} + } + if a.Key == slog.LevelKey { + level := a.Value.Any().(slog.Level) + return slog.Attr{Key: "level", Value: slog.StringValue(strings.ToLower(level.String()))} + } + if a.Key == slog.MessageKey { + return slog.Attr{Key: "message", Value: a.Value} + } + if a.Key == slog.SourceKey { + return a + } + return a + }, + } + + if cfg.Format == FormatJSON { + baseHandler = slog.NewJSONHandler(cfg.Output, opts) + } else { + baseHandler = slog.NewTextHandler(cfg.Output, opts) + } + + return &HyperFleetHandler{ + handler: baseHandler, + component: cfg.Component, + version: cfg.Version, + hostname: cfg.Hostname, + } +} + +// Handle implements slog.Handler interface +func (h *HyperFleetHandler) Handle(ctx context.Context, r slog.Record) error { + r.AddAttrs( + slog.String("component", h.component), + slog.String("version", h.version), + slog.String("hostname", h.hostname), + ) + + if traceID, ok := ctx.Value(TraceIDCtxKey).(string); ok && traceID != "" { + r.AddAttrs(slog.String("trace_id", traceID)) + } + if spanID, ok := ctx.Value(SpanIDCtxKey).(string); ok && spanID != "" { + r.AddAttrs(slog.String("span_id", spanID)) + } + if requestID, ok := ctx.Value(OpIDKey).(string); ok && requestID != "" { + r.AddAttrs(slog.String("request_id", requestID)) + } + if clusterID, ok := ctx.Value(ClusterIDCtxKey).(string); ok && clusterID != "" { + r.AddAttrs(slog.String("cluster_id", clusterID)) + } + if resourceType, ok := ctx.Value(ResourceTypeCtxKey).(string); ok && resourceType != "" { + r.AddAttrs(slog.String("resource_type", resourceType)) + } + if resourceID, ok := ctx.Value(ResourceIDCtxKey).(string); ok && resourceID != "" { + r.AddAttrs(slog.String("resource_id", resourceID)) + } + + if r.Level >= slog.LevelError { + stackTrace := captureStackTrace(4) + r.AddAttrs(slog.Any("stack_trace", stackTrace)) + r.AddAttrs(slog.String("error", r.Message)) + } + + return h.handler.Handle(ctx, r) +} + +// Enabled implements slog.Handler interface +func (h *HyperFleetHandler) Enabled(ctx context.Context, level slog.Level) bool { + return h.handler.Enabled(ctx, level) +} + +// WithAttrs implements slog.Handler interface +func (h *HyperFleetHandler) WithAttrs(attrs []slog.Attr) slog.Handler { + return &HyperFleetHandler{ + handler: h.handler.WithAttrs(attrs), + component: h.component, + version: h.version, + hostname: h.hostname, + } +} + +// WithGroup implements slog.Handler interface +func (h *HyperFleetHandler) WithGroup(name string) slog.Handler { + return &HyperFleetHandler{ + handler: h.handler.WithGroup(name), + component: h.component, + version: h.version, + hostname: h.hostname, + } +} + +func captureStackTrace(skip int) []string { + const maxFrames = 15 + pcs := make([]uintptr, maxFrames) + n := runtime.Callers(skip, pcs) + + frames := runtime.CallersFrames(pcs[:n]) + var stackTrace []string + for { + frame, more := frames.Next() + if !strings.Contains(frame.Function, "runtime.") && + !strings.Contains(frame.Function, "testing.") { + stackTrace = append(stackTrace, + fmt.Sprintf("%s:%d %s", frame.File, frame.Line, frame.Function)) + } + if !more { + break + } + } + return stackTrace +} + +var globalLogger *slog.Logger + +// InitGlobalLogger initializes the global logger with the given configuration +func InitGlobalLogger(cfg *LogConfig) { + handler := NewHyperFleetHandler(cfg) + globalLogger = slog.New(handler) + slog.SetDefault(globalLogger) +} + +// GetLogger returns a context-aware logger +func GetLogger(ctx context.Context) *slog.Logger { + if globalLogger == nil { + return slog.Default() + } + return globalLogger +} + +// ParseLogLevel converts string to slog.Level +func ParseLogLevel(level string) (slog.Level, error) { + switch strings.ToLower(strings.TrimSpace(level)) { + case "debug": + return slog.LevelDebug, nil + case "info": + return slog.LevelInfo, nil + case "warn", "warning": + return slog.LevelWarn, nil + case "error": + return slog.LevelError, nil + default: + return slog.LevelInfo, fmt.Errorf("unknown log level: %s (valid: debug, info, warn, error)", level) + } +} + +// ParseLogFormat converts string to LogFormat +func ParseLogFormat(format string) (LogFormat, error) { + switch strings.ToLower(strings.TrimSpace(format)) { + case "text": + return FormatText, nil + case "json": + return FormatJSON, nil + default: + return FormatText, fmt.Errorf("unknown log format: %s (valid: text, json)", format) + } +} + +// ParseLogOutput converts string to io.Writer +func ParseLogOutput(output string) (io.Writer, error) { + switch strings.ToLower(strings.TrimSpace(output)) { + case "stdout", "": + return os.Stdout, nil + case "stderr": + return os.Stderr, nil + default: + return nil, fmt.Errorf("unknown log output: %s (valid: stdout, stderr)", output) + } +} + +func Debug(ctx context.Context, msg string, args ...any) { + GetLogger(ctx).DebugContext(ctx, msg, args...) +} + +func Info(ctx context.Context, msg string, args ...any) { + GetLogger(ctx).InfoContext(ctx, msg, args...) +} + +func Warn(ctx context.Context, msg string, args ...any) { + GetLogger(ctx).WarnContext(ctx, msg, args...) +} + +func Error(ctx context.Context, msg string, args ...any) { + GetLogger(ctx).ErrorContext(ctx, msg, args...) +} + +func Debugf(ctx context.Context, format string, args ...interface{}) { + GetLogger(ctx).DebugContext(ctx, fmt.Sprintf(format, args...)) +} + +func Infof(ctx context.Context, format string, args ...interface{}) { + GetLogger(ctx).InfoContext(ctx, fmt.Sprintf(format, args...)) +} + +func Warnf(ctx context.Context, format string, args ...interface{}) { + GetLogger(ctx).WarnContext(ctx, fmt.Sprintf(format, args...)) +} + +func Errorf(ctx context.Context, format string, args ...interface{}) { + GetLogger(ctx).ErrorContext(ctx, fmt.Sprintf(format, args...)) +} + +// Legacy glog-based logger for backward compatibility +// DEPRECATED: Will be removed in PR 3 type OCMLogger interface { V(level int32) OCMLogger Infof(format string, args ...interface{}) @@ -19,22 +263,22 @@ type OCMLogger interface { Fatal(message string) } -var _ OCMLogger = &logger{} +var _ OCMLogger = &ocmLogger{} type extra map[string]interface{} -type logger struct { +type ocmLogger struct { context context.Context level int32 accountID string - // TODO username is unused, should we be logging it? Could be pii - username string - extra extra + username string + extra extra } -// NewOCMLogger creates a new logger instance with a default verbosity of 1 +// NewOCMLogger creates a legacy logger instance +// DEPRECATED: Use slog-based logger instead func NewOCMLogger(ctx context.Context) OCMLogger { - logger := &logger{ + logger := &ocmLogger{ context: ctx, level: 1, extra: make(extra), @@ -43,7 +287,7 @@ func NewOCMLogger(ctx context.Context) OCMLogger { return logger } -func (l *logger) prepareLogPrefix(message string, extra extra) string { +func (l *ocmLogger) prepareLogPrefix(message string, extra extra) string { prefix := " " if txid, ok := l.context.Value("txid").(int64); ok { @@ -66,7 +310,7 @@ func (l *logger) prepareLogPrefix(message string, extra extra) string { return fmt.Sprintf("%s %s %s", prefix, message, strings.Join(args, " ")) } -func (l *logger) prepareLogPrefixf(format string, args ...interface{}) string { +func (l *ocmLogger) prepareLogPrefixf(format string, args ...interface{}) string { orig := fmt.Sprintf(format, args...) prefix := " " @@ -85,8 +329,8 @@ func (l *logger) prepareLogPrefixf(format string, args ...interface{}) string { return fmt.Sprintf("%s%s", prefix, orig) } -func (l *logger) V(level int32) OCMLogger { - return &logger{ +func (l *ocmLogger) V(level int32) OCMLogger { + return &ocmLogger{ context: l.context, accountID: l.accountID, username: l.username, @@ -94,34 +338,33 @@ func (l *logger) V(level int32) OCMLogger { } } -// Infof doesn't trigger Sentry error -func (l *logger) Infof(format string, args ...interface{}) { +func (l *ocmLogger) Infof(format string, args ...interface{}) { prefixed := l.prepareLogPrefixf(format, args...) glog.V(glog.Level(l.level)).Infof("%s", prefixed) } -func (l *logger) Extra(key string, value interface{}) OCMLogger { +func (l *ocmLogger) Extra(key string, value interface{}) OCMLogger { l.extra[key] = value return l } -func (l *logger) Info(message string) { +func (l *ocmLogger) Info(message string) { l.log(message, glog.V(glog.Level(l.level)).Infoln) } -func (l *logger) Warning(message string) { +func (l *ocmLogger) Warning(message string) { l.log(message, glog.Warningln) } -func (l *logger) Error(message string) { +func (l *ocmLogger) Error(message string) { l.log(message, glog.Errorln) } -func (l *logger) Fatal(message string) { +func (l *ocmLogger) Fatal(message string) { l.log(message, glog.Fatalln) } -func (l *logger) log(message string, glogFunc func(args ...interface{})) { +func (l *ocmLogger) log(message string, glogFunc func(args ...interface{})) { prefixed := l.prepareLogPrefix(message, l.extra) glogFunc(prefixed) } diff --git a/pkg/logger/logger_test.go b/pkg/logger/logger_test.go new file mode 100644 index 0000000..3b4d741 --- /dev/null +++ b/pkg/logger/logger_test.go @@ -0,0 +1,572 @@ +package logger + +import ( + "bytes" + "context" + "encoding/json" + "log/slog" + "os" + "strings" + "testing" +) + +// TestParseLogLevel tests log level parsing with various inputs +func TestParseLogLevel(t *testing.T) { + tests := []struct { + name string + input string + expected slog.Level + expectErr bool + }{ + {"debug", "debug", slog.LevelDebug, false}, + {"info", "info", slog.LevelInfo, false}, + {"warn", "warn", slog.LevelWarn, false}, + {"warning", "warning", slog.LevelWarn, false}, + {"error", "error", slog.LevelError, false}, + {"case insensitive", "DEBUG", slog.LevelDebug, false}, + {"with whitespace", " info ", slog.LevelInfo, false}, + {"invalid", "invalid", slog.LevelInfo, true}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + level, err := ParseLogLevel(tt.input) + if tt.expectErr { + if err == nil { + t.Error("expected error, got nil") + } + if !strings.Contains(err.Error(), "unknown log level") { + t.Errorf("unexpected error message: %v", err) + } + } else { + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if level != tt.expected { + t.Errorf("expected %v, got %v", tt.expected, level) + } + } + }) + } +} + +// TestParseLogFormat tests log format parsing with various inputs +func TestParseLogFormat(t *testing.T) { + tests := []struct { + name string + input string + expected LogFormat + expectErr bool + }{ + {"text", "text", FormatText, false}, + {"json", "json", FormatJSON, false}, + {"case insensitive", "JSON", FormatJSON, false}, + {"invalid", "invalid", FormatText, true}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + format, err := ParseLogFormat(tt.input) + if tt.expectErr { + if err == nil { + t.Error("expected error, got nil") + } + if !strings.Contains(err.Error(), "unknown log format") { + t.Errorf("unexpected error message: %v", err) + } + } else { + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if format != tt.expected { + t.Errorf("expected %v, got %v", tt.expected, format) + } + } + }) + } +} + +// TestParseLogOutput tests log output parsing with various inputs +func TestParseLogOutput(t *testing.T) { + tests := []struct { + name string + input string + expected *os.File + expectErr bool + }{ + {"stdout", "stdout", os.Stdout, false}, + {"stderr", "stderr", os.Stderr, false}, + {"empty defaults to stdout", "", os.Stdout, false}, + {"invalid", "invalid", nil, true}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + output, err := ParseLogOutput(tt.input) + if tt.expectErr { + if err == nil { + t.Error("expected error, got nil") + } + if !strings.Contains(err.Error(), "unknown log output") { + t.Errorf("unexpected error message: %v", err) + } + } else { + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if output != tt.expected { + t.Errorf("expected %v, got %v", tt.expected, output) + } + } + }) + } +} + +// TestJSONFormat_BasicLog tests basic JSON format output +func TestJSONFormat_BasicLog(t *testing.T) { + var buf bytes.Buffer + cfg := &LogConfig{ + Level: slog.LevelInfo, + Format: FormatJSON, + Output: &buf, + Component: "api", + Version: "test-version", + Hostname: "test-host", + } + + InitGlobalLogger(cfg) + ctx := context.Background() + Info(ctx, "test message", "key", "value") + + var logEntry map[string]interface{} + if err := json.Unmarshal(buf.Bytes(), &logEntry); err != nil { + t.Fatalf("failed to parse JSON log: %v", err) + } + + // Check required fields + if logEntry["message"] != "test message" { + t.Errorf("expected message 'test message', got %v", logEntry["message"]) + } + if logEntry["level"] != "info" { + t.Errorf("expected level 'info', got %v", logEntry["level"]) + } + if logEntry["component"] != "api" { + t.Errorf("expected component 'api', got %v", logEntry["component"]) + } + if logEntry["version"] != "test-version" { + t.Errorf("expected version 'test-version', got %v", logEntry["version"]) + } + if logEntry["hostname"] != "test-host" { + t.Errorf("expected hostname 'test-host', got %v", logEntry["hostname"]) + } + if logEntry["key"] != "value" { + t.Errorf("expected key 'value', got %v", logEntry["key"]) + } +} + +// TestTextFormat_BasicLog tests basic text format output +func TestTextFormat_BasicLog(t *testing.T) { + var buf bytes.Buffer + cfg := &LogConfig{ + Level: slog.LevelInfo, + Format: FormatText, + Output: &buf, + Component: "api", + Version: "test-version", + Hostname: "test-host", + } + + InitGlobalLogger(cfg) + ctx := context.Background() + Info(ctx, "test message", "key", "value") + + output := buf.String() + if !strings.Contains(output, "test message") { + t.Errorf("expected output to contain 'test message', got: %s", output) + } + if !strings.Contains(output, "level=info") { + t.Errorf("expected output to contain 'level=info', got: %s", output) + } + if !strings.Contains(output, "component=api") { + t.Errorf("expected output to contain 'component=api', got: %s", output) + } +} + +// TestContextFields tests context field extraction for all supported fields +func TestContextFields(t *testing.T) { + tests := []struct { + name string + setupCtx func(context.Context) context.Context + fieldName string + fieldValue string + }{ + { + name: "trace_id", + setupCtx: func(ctx context.Context) context.Context { return WithTraceID(ctx, "trace-123") }, + fieldName: "trace_id", + fieldValue: "trace-123", + }, + { + name: "span_id", + setupCtx: func(ctx context.Context) context.Context { return WithSpanID(ctx, "span-456") }, + fieldName: "span_id", + fieldValue: "span-456", + }, + { + name: "request_id", + setupCtx: func(ctx context.Context) context.Context { + return context.WithValue(ctx, OpIDKey, "op-789") + }, + fieldName: "request_id", + fieldValue: "op-789", + }, + { + name: "cluster_id", + setupCtx: func(ctx context.Context) context.Context { return WithClusterID(ctx, "cluster-abc") }, + fieldName: "cluster_id", + fieldValue: "cluster-abc", + }, + { + name: "resource_type", + setupCtx: func(ctx context.Context) context.Context { return WithResourceType(ctx, "managed-cluster") }, + fieldName: "resource_type", + fieldValue: "managed-cluster", + }, + { + name: "resource_id", + setupCtx: func(ctx context.Context) context.Context { return WithResourceID(ctx, "resource-xyz") }, + fieldName: "resource_id", + fieldValue: "resource-xyz", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var buf bytes.Buffer + cfg := &LogConfig{ + Level: slog.LevelInfo, + Format: FormatJSON, + Output: &buf, + Component: "api", + Version: "test-version", + Hostname: "test-host", + } + InitGlobalLogger(cfg) + + ctx := tt.setupCtx(context.Background()) + Info(ctx, "test message") + + var logEntry map[string]interface{} + if err := json.Unmarshal(buf.Bytes(), &logEntry); err != nil { + t.Fatalf("failed to parse JSON log: %v", err) + } + + if logEntry[tt.fieldName] != tt.fieldValue { + t.Errorf("expected %s='%s', got %v", tt.fieldName, tt.fieldValue, logEntry[tt.fieldName]) + } + }) + } +} + +// TestContextFields_Multiple tests multiple context fields together +func TestContextFields_Multiple(t *testing.T) { + var buf bytes.Buffer + cfg := &LogConfig{ + Level: slog.LevelInfo, + Format: FormatJSON, + Output: &buf, + Component: "api", + Version: "test-version", + Hostname: "test-host", + } + + InitGlobalLogger(cfg) + ctx := context.Background() + ctx = WithTraceID(ctx, "trace-123") + ctx = WithSpanID(ctx, "span-456") + ctx = WithClusterID(ctx, "cluster-abc") + Info(ctx, "test message") + + var logEntry map[string]interface{} + if err := json.Unmarshal(buf.Bytes(), &logEntry); err != nil { + t.Fatalf("failed to parse JSON log: %v", err) + } + + if logEntry["trace_id"] != "trace-123" { + t.Errorf("expected trace_id 'trace-123', got %v", logEntry["trace_id"]) + } + if logEntry["span_id"] != "span-456" { + t.Errorf("expected span_id 'span-456', got %v", logEntry["span_id"]) + } + if logEntry["cluster_id"] != "cluster-abc" { + t.Errorf("expected cluster_id 'cluster-abc', got %v", logEntry["cluster_id"]) + } +} + +// TestStackTrace tests stack trace capture for different log levels +func TestStackTrace(t *testing.T) { + tests := []struct { + name string + level slog.Level + logFunc func(context.Context, string, ...any) + expectStackTrace bool + expectErrorField bool + }{ + {"error level has stack trace", slog.LevelError, Error, true, true}, + {"info level no stack trace", slog.LevelInfo, Info, false, false}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var buf bytes.Buffer + cfg := &LogConfig{ + Level: tt.level, + Format: FormatJSON, + Output: &buf, + Component: "api", + Version: "test-version", + Hostname: "test-host", + } + InitGlobalLogger(cfg) + + ctx := context.Background() + tt.logFunc(ctx, "test message") + + var logEntry map[string]interface{} + if err := json.Unmarshal(buf.Bytes(), &logEntry); err != nil { + t.Fatalf("failed to parse JSON log: %v", err) + } + + if tt.expectStackTrace { + if logEntry["stack_trace"] == nil { + t.Error("expected stack_trace field for error level") + } + // Verify stack trace is an array + stackTrace, ok := logEntry["stack_trace"].([]interface{}) + if !ok { + t.Fatalf("expected stack_trace to be array, got %T", logEntry["stack_trace"]) + } + if len(stackTrace) == 0 { + t.Error("expected stack_trace to have at least one frame") + } + } else { + if logEntry["stack_trace"] != nil { + t.Error("expected no stack_trace field for non-error level") + } + } + + if tt.expectErrorField { + if logEntry["error"] != "test message" { + t.Errorf("expected error field 'test message', got %v", logEntry["error"]) + } + } else { + if logEntry["error"] != nil { + t.Error("expected no error field for non-error level") + } + } + }) + } +} + +// TestLogLevelFiltering tests log level filtering +func TestLogLevelFiltering(t *testing.T) { + tests := []struct { + name string + configLevel slog.Level + logFunc func(context.Context, string, ...any) + shouldLog bool + }{ + {"debug filtered at info level", slog.LevelInfo, Debug, false}, + {"info enabled at info level", slog.LevelInfo, Info, true}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var buf bytes.Buffer + cfg := &LogConfig{ + Level: tt.configLevel, + Format: FormatJSON, + Output: &buf, + Component: "api", + Version: "test-version", + Hostname: "test-host", + } + InitGlobalLogger(cfg) + + ctx := context.Background() + tt.logFunc(ctx, "test message") + + if tt.shouldLog && buf.Len() == 0 { + t.Error("expected log output, got none") + } + if !tt.shouldLog && buf.Len() > 0 { + t.Errorf("expected no log output, got: %s", buf.String()) + } + }) + } +} + +// TestGetLogger_Uninitialized tests GetLogger with uninitialized global logger +func TestGetLogger_Uninitialized(t *testing.T) { + // Save and restore global logger + saved := globalLogger + defer func() { globalLogger = saved }() + + globalLogger = nil + ctx := context.Background() + logger := GetLogger(ctx) + + if logger == nil { + t.Error("expected GetLogger to return non-nil logger") + } + + // Should return default logger + if logger != slog.Default() { + t.Error("expected GetLogger to return slog.Default() when uninitialized") + } +} + +// TestConvenienceFunctions tests convenience functions (Debug, Info, Warn, Error) +func TestConvenienceFunctions(t *testing.T) { + tests := []struct { + name string + level slog.Level + logFunc func(context.Context, string, ...any) + expectedLvl string + }{ + {"Debug", slog.LevelDebug, Debug, "debug"}, + {"Warn", slog.LevelWarn, Warn, "warn"}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var buf bytes.Buffer + cfg := &LogConfig{ + Level: tt.level, + Format: FormatJSON, + Output: &buf, + Component: "api", + Version: "test-version", + Hostname: "test-host", + } + InitGlobalLogger(cfg) + + ctx := context.Background() + tt.logFunc(ctx, "test message", "key", "value") + + var logEntry map[string]interface{} + if err := json.Unmarshal(buf.Bytes(), &logEntry); err != nil { + t.Fatalf("failed to parse JSON log: %v", err) + } + + if logEntry["level"] != tt.expectedLvl { + t.Errorf("expected level '%s', got %v", tt.expectedLvl, logEntry["level"]) + } + if logEntry["message"] != "test message" { + t.Errorf("expected message 'test message', got %v", logEntry["message"]) + } + }) + } +} + +// TestFormattedFunctions tests printf-style formatted functions (Debugf, Infof, Warnf, Errorf) +func TestFormattedFunctions(t *testing.T) { + tests := []struct { + name string + level slog.Level + logFunc func(context.Context, string, ...interface{}) + format string + args []interface{} + expectedMsg string + expectedLvl string + expectStackTrace bool + expectErrorField bool + }{ + { + name: "Debugf", + level: slog.LevelDebug, + logFunc: Debugf, + format: "debug message with %s and %d", + args: []interface{}{"string", 123}, + expectedMsg: "debug message with string and 123", + expectedLvl: "debug", + expectStackTrace: false, + expectErrorField: false, + }, + { + name: "Infof", + level: slog.LevelInfo, + logFunc: Infof, + format: "info message: cluster=%s, count=%d", + args: []interface{}{"test-cluster", 42}, + expectedMsg: "info message: cluster=test-cluster, count=42", + expectedLvl: "info", + expectStackTrace: false, + expectErrorField: false, + }, + { + name: "Warnf", + level: slog.LevelWarn, + logFunc: Warnf, + format: "warning: %s failed with code %d", + args: []interface{}{"operation", 500}, + expectedMsg: "warning: operation failed with code 500", + expectedLvl: "warn", + expectStackTrace: false, + expectErrorField: false, + }, + { + name: "Errorf", + level: slog.LevelError, + logFunc: Errorf, + format: "error: failed to process %s: %v", + args: []interface{}{"resource-123", "connection timeout"}, + expectedMsg: "error: failed to process resource-123: connection timeout", + expectedLvl: "error", + expectStackTrace: true, + expectErrorField: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var buf bytes.Buffer + cfg := &LogConfig{ + Level: tt.level, + Format: FormatJSON, + Output: &buf, + Component: "api", + Version: "test-version", + Hostname: "test-host", + } + InitGlobalLogger(cfg) + + ctx := context.Background() + tt.logFunc(ctx, tt.format, tt.args...) + + var logEntry map[string]interface{} + if err := json.Unmarshal(buf.Bytes(), &logEntry); err != nil { + t.Fatalf("failed to parse JSON log: %v", err) + } + + if logEntry["level"] != tt.expectedLvl { + t.Errorf("expected level '%s', got %v", tt.expectedLvl, logEntry["level"]) + } + if logEntry["message"] != tt.expectedMsg { + t.Errorf("expected message '%s', got %v", tt.expectedMsg, logEntry["message"]) + } + + if tt.expectErrorField { + if logEntry["error"] != tt.expectedMsg { + t.Errorf("expected error field '%s', got %v", tt.expectedMsg, logEntry["error"]) + } + } + + if tt.expectStackTrace { + if logEntry["stack_trace"] == nil { + t.Error("expected stack_trace field for error level") + } + } + }) + } +} diff --git a/pkg/logger/operationid_middleware.go b/pkg/logger/operationid_middleware.go index 497b9ed..ad8b527 100755 --- a/pkg/logger/operationid_middleware.go +++ b/pkg/logger/operationid_middleware.go @@ -1,17 +1,9 @@ package logger import ( - "context" "net/http" - - "github.com/segmentio/ksuid" ) -type OperationIDKey string - -const OpIDKey OperationIDKey = "opID" -const OpIDHeader OperationIDKey = "X-Operation-ID" - // OperationIDMiddleware Middleware wraps the given HTTP handler so that the details of the request are sent to the log. func OperationIDMiddleware(handler http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -25,19 +17,3 @@ func OperationIDMiddleware(handler http.Handler) http.Handler { handler.ServeHTTP(w, r.WithContext(ctx)) }) } - -func WithOpID(ctx context.Context) context.Context { - if ctx.Value(OpIDKey) != nil { - return ctx - } - opID := ksuid.New().String() - return context.WithValue(ctx, OpIDKey, opID) -} - -// GetOperationID get operationID of the context -func GetOperationID(ctx context.Context) string { - if opID, ok := ctx.Value(OpIDKey).(string); ok { - return opID - } - return "" -} From 473fa4f673ea3f4e4e3d8ca5c63b815c0d0fb986 Mon Sep 17 00:00:00 2001 From: yasun Date: Wed, 7 Jan 2026 17:42:03 +0800 Subject: [PATCH 2/3] HYPERFLEET-363 | feat(observability): add OpenTelemetry and data masking middleware --- cmd/hyperfleet-api/server/routes.go | 6 + go.mod | 13 +- go.sum | 14 ++ pkg/middleware/masking.go | 112 ++++++++++ pkg/middleware/masking_test.go | 306 ++++++++++++++++++++++++++++ pkg/middleware/otel.go | 45 ++++ pkg/telemetry/otel.go | 65 ++++++ 7 files changed, 555 insertions(+), 6 deletions(-) create mode 100644 pkg/middleware/masking.go create mode 100644 pkg/middleware/masking_test.go create mode 100644 pkg/middleware/otel.go create mode 100644 pkg/telemetry/otel.go diff --git a/cmd/hyperfleet-api/server/routes.go b/cmd/hyperfleet-api/server/routes.go index 3b867ba..f94b907 100755 --- a/cmd/hyperfleet-api/server/routes.go +++ b/cmd/hyperfleet-api/server/routes.go @@ -68,6 +68,12 @@ func (s *apiServer) routes() *mux.Router { // Operation ID middleware sets a relatively unique operation ID in the context of each request for debugging purposes mainRouter.Use(logger.OperationIDMiddleware) + // OpenTelemetry middleware (conditionally enabled) + // Extracts trace_id/span_id from traceparent header and adds to logger context + if env().Config.Logging.OTel.Enabled { + mainRouter.Use(middleware.OTelMiddleware) + } + // Request logging middleware logs pertinent information about the request and response mainRouter.Use(logging.RequestLoggingMiddleware) diff --git a/go.mod b/go.mod index 88bdc01..11a40e8 100755 --- a/go.mod +++ b/go.mod @@ -116,17 +116,18 @@ require ( github.com/urfave/negroni v1.0.0 // indirect github.com/woodsbury/decimal128 v1.3.0 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect - go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect - go.opentelemetry.io/otel v1.38.0 // indirect + go.opentelemetry.io/otel v1.39.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 // indirect - go.opentelemetry.io/otel/metric v1.38.0 // indirect - go.opentelemetry.io/otel/sdk v1.38.0 // indirect - go.opentelemetry.io/otel/trace v1.38.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.39.0 // indirect + go.opentelemetry.io/otel/metric v1.39.0 // indirect + go.opentelemetry.io/otel/sdk v1.39.0 // indirect + go.opentelemetry.io/otel/trace v1.39.0 // indirect golang.org/x/crypto v0.41.0 // indirect golang.org/x/net v0.43.0 // indirect golang.org/x/sync v0.17.0 // indirect - golang.org/x/sys v0.36.0 // indirect + golang.org/x/sys v0.39.0 // indirect golang.org/x/text v0.29.0 // indirect golang.org/x/time v0.3.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20251014184007-4626949a642f // indirect diff --git a/go.sum b/go.sum index f143ba0..038f8e3 100755 --- a/go.sum +++ b/go.sum @@ -636,20 +636,32 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= +go.opentelemetry.io/otel v1.39.0 h1:8yPrr/S0ND9QEfTfdP9V+SiwT4E0G7Y5MO7p85nis48= +go.opentelemetry.io/otel v1.39.0/go.mod h1:kLlFTywNWrFyEdH0oj2xK0bFYZtHRYUdv1NklR/tgc8= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 h1:GqRJVj7UmLjCVyVJ3ZFLdPRmhDUp2zFmQe3RHIOsw24= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0/go.mod h1:ri3aaHSmCTVYu2AWv44YMauwAQc0aqI9gHKIcSbI1pU= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 h1:aTL7F04bJHUlztTsNGJ2l+6he8c+y/b//eR0jjjemT4= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0/go.mod h1:kldtb7jDTeol0l3ewcmd8SDvx3EmIE7lyvqbasU3QC4= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.39.0 h1:8UPA4IbVZxpsD76ihGOQiFml99GPAEZLohDXvqHdi6U= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.39.0/go.mod h1:MZ1T/+51uIVKlRzGw1Fo46KEWThjlCBZKl2LzY5nv4g= go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= +go.opentelemetry.io/otel/metric v1.39.0 h1:d1UzonvEZriVfpNKEVmHXbdf909uGTOQjA0HF0Ls5Q0= +go.opentelemetry.io/otel/metric v1.39.0/go.mod h1:jrZSWL33sD7bBxg1xjrqyDjnuzTUB0x1nBERXd7Ftcs= go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= +go.opentelemetry.io/otel/sdk v1.39.0 h1:nMLYcjVsvdui1B/4FRkwjzoRVsMK8uL/cj0OyhKzt18= +go.opentelemetry.io/otel/sdk v1.39.0/go.mod h1:vDojkC4/jsTJsE+kh+LXYQlbL8CgrEcwmt1ENZszdJE= go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= +go.opentelemetry.io/otel/trace v1.39.0 h1:2d2vfpEDmCJ5zVYz7ijaJdOF59xLomrvj7bjt6/qCJI= +go.opentelemetry.io/otel/trace v1.39.0/go.mod h1:88w4/PnZSazkGzz/w84VHpQafiU4EtqqlVdxWy+rNOA= go.opentelemetry.io/proto/otlp v1.7.1 h1:gTOMpGDb0WTBOP8JaO72iL3auEZhVmAQg4ipjOVAtj4= go.opentelemetry.io/proto/otlp v1.7.1/go.mod h1:b2rVh6rfI/s2pHWNlB7ILJcRALpcNDzKhACevjI+ZnE= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= @@ -850,6 +862,8 @@ golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k= golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= +golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= diff --git a/pkg/middleware/masking.go b/pkg/middleware/masking.go new file mode 100644 index 0000000..a7addf6 --- /dev/null +++ b/pkg/middleware/masking.go @@ -0,0 +1,112 @@ +package middleware + +import ( + "encoding/json" + "net/http" + "strings" + + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/config" +) + +// MaskingMiddleware handles sensitive data masking for logging +// Masks sensitive HTTP headers and JSON body fields to prevent PII/secrets leakage +type MaskingMiddleware struct { + enabled bool + sensitiveHeaders []string + sensitiveFields []string +} + +// NewMaskingMiddleware creates a new masking middleware from logging config +func NewMaskingMiddleware(cfg *config.LoggingConfig) *MaskingMiddleware { + return &MaskingMiddleware{ + enabled: cfg.Masking.Enabled, + sensitiveHeaders: cfg.GetSensitiveHeadersList(), + sensitiveFields: cfg.GetSensitiveFieldsList(), + } +} + +// MaskHeaders masks sensitive HTTP headers +// Returns a new header map with sensitive values replaced by "***REDACTED***" +func (m *MaskingMiddleware) MaskHeaders(headers http.Header) http.Header { + if !m.enabled { + return headers + } + + masked := make(http.Header) + for key, values := range headers { + if m.isSensitiveHeader(key) { + masked[key] = []string{"***REDACTED***"} + } else { + masked[key] = values + } + } + return masked +} + +// MaskBody masks sensitive fields in JSON body +// Returns a new JSON byte array with sensitive fields replaced by "***REDACTED***" +// If body is not valid JSON, returns original body unchanged +func (m *MaskingMiddleware) MaskBody(body []byte) []byte { + if !m.enabled || len(body) == 0 { + return body + } + + var data map[string]interface{} + if err := json.Unmarshal(body, &data); err != nil { + // Not valid JSON, return unchanged + return body + } + + m.maskMapRecursive(data) + + masked, err := json.Marshal(data) + if err != nil { + // Failed to marshal, return original + return body + } + return masked +} + +// maskMapRecursive recursively masks sensitive fields in nested maps +func (m *MaskingMiddleware) maskMapRecursive(data map[string]interface{}) { + for key, value := range data { + if m.isSensitiveField(key) { + data[key] = "***REDACTED***" + continue + } + + // Recursively handle nested objects + switch v := value.(type) { + case map[string]interface{}: + m.maskMapRecursive(v) + case []interface{}: + // Handle arrays of objects + for _, item := range v { + if nestedMap, ok := item.(map[string]interface{}); ok { + m.maskMapRecursive(nestedMap) + } + } + } + } +} + +// isSensitiveHeader checks if a header name is sensitive (case-insensitive) +func (m *MaskingMiddleware) isSensitiveHeader(header string) bool { + for _, sensitive := range m.sensitiveHeaders { + if strings.EqualFold(header, sensitive) { + return true + } + } + return false +} + +// isSensitiveField checks if a field name contains sensitive keywords (case-insensitive) +func (m *MaskingMiddleware) isSensitiveField(field string) bool { + lower := strings.ToLower(field) + for _, sensitive := range m.sensitiveFields { + if strings.Contains(lower, strings.ToLower(sensitive)) { + return true + } + } + return false +} diff --git a/pkg/middleware/masking_test.go b/pkg/middleware/masking_test.go new file mode 100644 index 0000000..389752c --- /dev/null +++ b/pkg/middleware/masking_test.go @@ -0,0 +1,306 @@ +package middleware + +import ( + "encoding/json" + "net/http" + "testing" + + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/config" +) + +func TestMaskHeaders(t *testing.T) { + tests := []struct { + name string + enabled bool + headers http.Header + expected http.Header + }{ + { + name: "mask authorization header", + enabled: true, + headers: http.Header{ + "Authorization": []string{"Bearer token123"}, + "Content-Type": []string{"application/json"}, + }, + expected: http.Header{ + "Authorization": []string{"***REDACTED***"}, + "Content-Type": []string{"application/json"}, + }, + }, + { + name: "mask multiple sensitive headers", + enabled: true, + headers: http.Header{ + "Authorization": []string{"Bearer token123"}, + "X-Api-Key": []string{"secret-key"}, + "Cookie": []string{"session=abc123"}, + "User-Agent": []string{"curl/7.0"}, + }, + expected: http.Header{ + "Authorization": []string{"***REDACTED***"}, + "X-Api-Key": []string{"***REDACTED***"}, + "Cookie": []string{"***REDACTED***"}, + "User-Agent": []string{"curl/7.0"}, + }, + }, + { + name: "case insensitive header matching", + enabled: true, + headers: http.Header{ + "authorization": []string{"Bearer token123"}, + "COOKIE": []string{"session=abc123"}, + }, + expected: http.Header{ + "authorization": []string{"***REDACTED***"}, + "COOKIE": []string{"***REDACTED***"}, + }, + }, + { + name: "masking disabled", + enabled: false, + headers: http.Header{ + "Authorization": []string{"Bearer token123"}, + }, + expected: http.Header{ + "Authorization": []string{"Bearer token123"}, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg := &config.LoggingConfig{ + Masking: config.MaskingConfig{ + Enabled: tt.enabled, + SensitiveHeaders: "Authorization,X-API-Key,Cookie", + }, + } + m := NewMaskingMiddleware(cfg) + result := m.MaskHeaders(tt.headers) + + for key, expectedValues := range tt.expected { + if resultValues, ok := result[key]; !ok { + t.Errorf("expected header %s not found in result", key) + } else if len(resultValues) != len(expectedValues) || resultValues[0] != expectedValues[0] { + t.Errorf("header %s = %v, want %v", key, resultValues, expectedValues) + } + } + }) + } +} + +func TestMaskBody(t *testing.T) { + tests := []struct { + name string + enabled bool + body string + expected string + }{ + { + name: "mask password field", + enabled: true, + body: `{"username":"alice","password":"secret123"}`, + expected: `{"password":"***REDACTED***","username":"alice"}`, + }, + { + name: "mask nested sensitive fields", + enabled: true, + body: `{"user":{"name":"alice","password":"secret"},"api_key":"key123"}`, + expected: `{"api_key":"***REDACTED***","user":{"name":"alice","password":"***REDACTED***"}}`, + }, + { + name: "mask array of objects", + enabled: true, + body: `{"users":[{"name":"alice","password":"pass1"},{"name":"bob","secret":"pass2"}]}`, + expected: `{"users":[{"name":"alice","password":"***REDACTED***"},{"name":"bob","secret":"***REDACTED***"}]}`, + }, + { + name: "mask multiple sensitive fields", + enabled: true, + body: `{"password":"pass","secret":"sec","token":"tok","api_key":"key","normal":"value"}`, + expected: `{"api_key":"***REDACTED***","normal":"value","password":"***REDACTED***","secret":"***REDACTED***","token":"***REDACTED***"}`, + }, + { + name: "case insensitive field matching", + enabled: true, + body: `{"Password":"pass","SECRET":"sec","AccessToken":"tok"}`, + expected: `{"AccessToken":"***REDACTED***","Password":"***REDACTED***","SECRET":"***REDACTED***"}`, + }, + { + name: "non-JSON body unchanged", + enabled: true, + body: `not json content`, + expected: `not json content`, + }, + { + name: "empty body", + enabled: true, + body: ``, + expected: ``, + }, + { + name: "masking disabled", + enabled: false, + body: `{"password":"secret"}`, + expected: `{"password":"secret"}`, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg := &config.LoggingConfig{ + Masking: config.MaskingConfig{ + Enabled: tt.enabled, + SensitiveFields: "password,secret,token,api_key,access_token", + }, + } + m := NewMaskingMiddleware(cfg) + result := m.MaskBody([]byte(tt.body)) + + // For JSON, compare as maps to handle key ordering + if tt.body != "" && tt.body[0] == '{' { + var resultMap, expectedMap map[string]interface{} + if err := json.Unmarshal(result, &resultMap); err != nil { + t.Fatalf("failed to unmarshal result: %v", err) + } + if err := json.Unmarshal([]byte(tt.expected), &expectedMap); err != nil { + t.Fatalf("failed to unmarshal expected: %v", err) + } + + // Deep comparison + if !deepEqual(resultMap, expectedMap) { + t.Errorf("MaskBody() = %s, want %s", result, tt.expected) + } + } else { + // For non-JSON, compare as strings + if string(result) != tt.expected { + t.Errorf("MaskBody() = %s, want %s", result, tt.expected) + } + } + }) + } +} + +func TestIsSensitiveHeader(t *testing.T) { + cfg := &config.LoggingConfig{ + Masking: config.MaskingConfig{ + Enabled: true, + SensitiveHeaders: "Authorization,X-API-Key,Cookie", + }, + } + m := NewMaskingMiddleware(cfg) + + tests := []struct { + header string + expected bool + }{ + {"Authorization", true}, + {"authorization", true}, + {"AUTHORIZATION", true}, + {"X-API-Key", true}, + {"x-api-key", true}, + {"Cookie", true}, + {"Content-Type", false}, + {"User-Agent", false}, + } + + for _, tt := range tests { + t.Run(tt.header, func(t *testing.T) { + result := m.isSensitiveHeader(tt.header) + if result != tt.expected { + t.Errorf("isSensitiveHeader(%s) = %v, want %v", tt.header, result, tt.expected) + } + }) + } +} + +func TestIsSensitiveField(t *testing.T) { + cfg := &config.LoggingConfig{ + Masking: config.MaskingConfig{ + Enabled: true, + SensitiveFields: "password,secret,token,api_key", + }, + } + m := NewMaskingMiddleware(cfg) + + tests := []struct { + field string + expected bool + }{ + {"password", true}, + {"Password", true}, + {"user_password", true}, + {"secret", true}, + {"client_secret", true}, + {"token", true}, + {"access_token", true}, + {"api_key", true}, + {"username", false}, + {"email", false}, + {"normal_field", false}, + } + + for _, tt := range tests { + t.Run(tt.field, func(t *testing.T) { + result := m.isSensitiveField(tt.field) + if result != tt.expected { + t.Errorf("isSensitiveField(%s) = %v, want %v", tt.field, result, tt.expected) + } + }) + } +} + +// deepEqual compares two maps recursively +func deepEqual(a, b map[string]interface{}) bool { + if len(a) != len(b) { + return false + } + + for key, aVal := range a { + bVal, ok := b[key] + if !ok { + return false + } + + switch aTyped := aVal.(type) { + case map[string]interface{}: + bTyped, ok := bVal.(map[string]interface{}) + if !ok || !deepEqual(aTyped, bTyped) { + return false + } + case []interface{}: + bTyped, ok := bVal.([]interface{}) + if !ok || !deepEqualSlice(aTyped, bTyped) { + return false + } + default: + if aVal != bVal { + return false + } + } + } + + return true +} + +// deepEqualSlice compares two slices recursively +func deepEqualSlice(a, b []interface{}) bool { + if len(a) != len(b) { + return false + } + + for i := range a { + aMap, aIsMap := a[i].(map[string]interface{}) + bMap, bIsMap := b[i].(map[string]interface{}) + + if aIsMap && bIsMap { + if !deepEqual(aMap, bMap) { + return false + } + } else if a[i] != b[i] { + return false + } + } + + return true +} diff --git a/pkg/middleware/otel.go b/pkg/middleware/otel.go new file mode 100644 index 0000000..93c32f9 --- /dev/null +++ b/pkg/middleware/otel.go @@ -0,0 +1,45 @@ +package middleware + +import ( + "net/http" + + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + "go.opentelemetry.io/otel/trace" + + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/logger" +) + +// OTelMiddleware wraps HTTP handlers with OpenTelemetry instrumentation +// Automatically: +// - Creates spans for HTTP requests +// - Extracts trace context from traceparent header (W3C Trace Context) +// - Injects trace context into outbound requests +// - Adds trace_id and span_id to logger context +func OTelMiddleware(handler http.Handler) http.Handler { + // Use otelhttp to automatically create spans + otelHandler := otelhttp.NewHandler(handler, "hyperfleet-api", + otelhttp.WithSpanNameFormatter(func(operation string, r *http.Request) string { + // Custom span name: "METHOD /path" + return r.Method + " " + r.URL.Path + }), + ) + + // Extract trace_id and span_id and add to context + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + span := trace.SpanFromContext(ctx) + + // If span context is valid, extract trace_id and span_id + if span.SpanContext().IsValid() { + traceID := span.SpanContext().TraceID().String() + spanID := span.SpanContext().SpanID().String() + + // Add to logger context + ctx = logger.WithTraceID(ctx, traceID) + ctx = logger.WithSpanID(ctx, spanID) + } + + // Serve with updated context + otelHandler.ServeHTTP(w, r.WithContext(ctx)) + }) +} diff --git a/pkg/telemetry/otel.go b/pkg/telemetry/otel.go new file mode 100644 index 0000000..1771fbb --- /dev/null +++ b/pkg/telemetry/otel.go @@ -0,0 +1,65 @@ +package telemetry + +import ( + "context" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/exporters/stdout/stdouttrace" + "go.opentelemetry.io/otel/sdk/resource" + "go.opentelemetry.io/otel/sdk/trace" + semconv "go.opentelemetry.io/otel/semconv/v1.4.0" +) + +// InitTraceProvider initializes OpenTelemetry trace provider +// Uses stdout exporter (traces output to logs, no external Collector needed) +// Future upgrade: Switch to OTLP HTTP exporter by changing only the exporter creation +func InitTraceProvider(serviceName, serviceVersion string, samplingRate float64) (*trace.TracerProvider, error) { + // Create stdout exporter + exporter, err := stdouttrace.New( + stdouttrace.WithPrettyPrint(), // Formatted output + ) + if err != nil { + return nil, err + } + + // Create resource (service information) + res, err := resource.New(context.Background(), + resource.WithAttributes( + semconv.ServiceNameKey.String(serviceName), + semconv.ServiceVersionKey.String(serviceVersion), + ), + ) + if err != nil { + return nil, err + } + + // Determine sampler based on sampling rate + var sampler trace.Sampler + if samplingRate >= 1.0 { + sampler = trace.AlwaysSample() // Sample all + } else if samplingRate <= 0.0 { + sampler = trace.NeverSample() // Sample none + } else { + sampler = trace.TraceIDRatioBased(samplingRate) + } + + // Create trace provider + tp := trace.NewTracerProvider( + trace.WithBatcher(exporter), + trace.WithResource(res), + trace.WithSampler(sampler), + ) + + // Set global trace provider + otel.SetTracerProvider(tp) + + return tp, nil +} + +// Shutdown gracefully shuts down the trace provider +func Shutdown(ctx context.Context, tp *trace.TracerProvider) error { + if tp == nil { + return nil + } + return tp.Shutdown(ctx) +} From 3c0994df0a869a9e4210bb9be1063de129387cb3 Mon Sep 17 00:00:00 2001 From: yasun Date: Wed, 7 Jan 2026 20:49:26 +0800 Subject: [PATCH 3/3] refactor(logger): migrate from glog to slog with structured logging update update update Replace OCMLogger with logger update Use atomic.Value instead of sync.once,metux update --- README.md | 1 + cmd/hyperfleet-api/environments/framework.go | 40 +- cmd/hyperfleet-api/main.go | 48 +- cmd/hyperfleet-api/migrate/cmd.go | 26 +- cmd/hyperfleet-api/servecmd/cmd.go | 100 +++- cmd/hyperfleet-api/server/api_server.go | 26 +- .../server/healthcheck_server.go | 10 +- .../server/logging/formatter.go | 8 - .../server/logging/formatter_json.go | 62 --- cmd/hyperfleet-api/server/logging/logging.go | 3 - .../logging/request_logging_middleware.go | 81 ++- .../server/logging/responseinfo.go | 10 - cmd/hyperfleet-api/server/logging/writer.go | 54 -- cmd/hyperfleet-api/server/metrics_server.go | 11 +- cmd/hyperfleet-api/server/routes.go | 21 +- cmd/hyperfleet-api/server/server.go | 6 +- docs/logging.md | 509 ++++++++++++++++++ go.mod | 30 +- go.sum | 72 ++- pkg/api/error.go | 12 +- pkg/auth/authz_middleware_mock.go | 4 +- pkg/auth/helpers.go | 5 +- pkg/db/context.go | 14 +- pkg/db/db_session/default.go | 22 +- pkg/db/db_session/test.go | 14 +- pkg/db/db_session/testcontainer.go | 45 +- pkg/db/migrations.go | 15 +- pkg/db/transaction_middleware.go | 11 +- pkg/errors/errors.go | 7 +- pkg/handlers/cluster_status.go | 5 +- pkg/handlers/framework.go | 11 +- pkg/handlers/helpers.go | 22 +- pkg/handlers/metadata.go | 14 +- pkg/handlers/nodepool_status.go | 5 +- pkg/handlers/openapi.go | 24 +- pkg/logger/context.go | 52 +- pkg/logger/http.go | 49 ++ pkg/logger/logger.go | 211 +++----- pkg/logger/logger_test.go | 174 +++--- pkg/logger/ocm_bridge.go | 64 +++ pkg/logger/operationid_middleware.go | 2 +- pkg/middleware/masking.go | 86 ++- pkg/middleware/masking_test.go | 86 ++- pkg/middleware/otel.go | 32 +- pkg/middleware/schema_validation.go | 14 +- pkg/services/cluster.go | 7 +- pkg/services/generic.go | 10 +- pkg/services/node_pool.go | 7 +- pkg/services/status_aggregation.go | 10 +- pkg/telemetry/otel.go | 2 +- test/helper.go | 67 ++- test/integration/integration_test.go | 13 +- 52 files changed, 1497 insertions(+), 737 deletions(-) delete mode 100755 cmd/hyperfleet-api/server/logging/formatter.go delete mode 100755 cmd/hyperfleet-api/server/logging/formatter_json.go delete mode 100755 cmd/hyperfleet-api/server/logging/logging.go delete mode 100755 cmd/hyperfleet-api/server/logging/responseinfo.go delete mode 100755 cmd/hyperfleet-api/server/logging/writer.go create mode 100644 docs/logging.md create mode 100644 pkg/logger/http.go create mode 100644 pkg/logger/ocm_bridge.go diff --git a/README.md b/README.md index fbe4a6a..585a982 100755 --- a/README.md +++ b/README.md @@ -155,6 +155,7 @@ This project uses [pre-commit](https://pre-commit.io/) for code quality checks. - **[Database](docs/database.md)** - Schema, migrations, and data model - **[Deployment](docs/deployment.md)** - Container images, Kubernetes deployment, and configuration - **[Authentication](docs/authentication.md)** - Development and production auth +- **[Logging](docs/logging.md)** - Structured logging, OpenTelemetry integration, and data masking ### Additional Resources diff --git a/cmd/hyperfleet-api/environments/framework.go b/cmd/hyperfleet-api/environments/framework.go index deaf8f7..514a3c1 100755 --- a/cmd/hyperfleet-api/environments/framework.go +++ b/cmd/hyperfleet-api/environments/framework.go @@ -1,16 +1,17 @@ package environments import ( + "context" "os" "strings" - "github.com/golang/glog" "github.com/spf13/pflag" "github.com/openshift-hyperfleet/hyperfleet-api/cmd/hyperfleet-api/environments/registry" "github.com/openshift-hyperfleet/hyperfleet-api/pkg/client/ocm" "github.com/openshift-hyperfleet/hyperfleet-api/pkg/config" "github.com/openshift-hyperfleet/hyperfleet-api/pkg/errors" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/logger" ) func init() { @@ -64,25 +65,30 @@ func (e *Env) AddFlags(flags *pflag.FlagSet) error { // This should be called after the e.Config has been set appropriately though AddFlags and pasing, done elsewhere // The environment does NOT handle flag parsing func (e *Env) Initialize() error { - glog.Infof("Initializing %s environment", e.Name) + ctx := context.Background() + logger.Infof(ctx, "Initializing %s environment", e.Name) envImpl, found := environments[e.Name] if !found { - glog.Fatalf("Unknown runtime environment: %s", e.Name) + logger.Errorf(ctx, "Unknown runtime environment: %s", e.Name) + os.Exit(1) } if err := envImpl.OverrideConfig(e.Config); err != nil { - glog.Fatalf("Failed to configure ApplicationConfig: %s", err) + logger.Errorf(ctx, "Failed to configure ApplicationConfig: %s", err) + os.Exit(1) } messages := environment.Config.ReadFiles() if len(messages) != 0 { - glog.Fatalf("unable to read configuration files:\n%s", strings.Join(messages, "\n")) + logger.Errorf(ctx, "unable to read configuration files:\n%s", strings.Join(messages, "\n")) + os.Exit(1) } // each env will set db explicitly because the DB impl has a `once` init section if err := envImpl.OverrideDatabase(&e.Database); err != nil { - glog.Fatalf("Failed to configure Database: %s", err) + logger.Errorf(ctx, "Failed to configure Database: %s", err) + os.Exit(1) } err := e.LoadClients() @@ -90,12 +96,14 @@ func (e *Env) Initialize() error { return err } if err := envImpl.OverrideClients(&e.Clients); err != nil { - glog.Fatalf("Failed to configure Clients: %s", err) + logger.Errorf(ctx, "Failed to configure Clients: %s", err) + os.Exit(1) } e.LoadServices() if err := envImpl.OverrideServices(&e.Services); err != nil { - glog.Fatalf("Failed to configure Services: %s", err) + logger.Errorf(ctx, "Failed to configure Services: %s", err) + os.Exit(1) } seedErr := e.Seed() @@ -104,7 +112,8 @@ func (e *Env) Initialize() error { } if err := envImpl.OverrideHandlers(&e.Handlers); err != nil { - glog.Fatalf("Failed to configure Handlers: %s", err) + logger.Errorf(ctx, "Failed to configure Handlers: %s", err) + os.Exit(1) } return nil @@ -123,6 +132,7 @@ func (e *Env) LoadServices() { } func (e *Env) LoadClients() error { + ctx := context.Background() var err error ocmConfig := ocm.Config{ @@ -136,13 +146,13 @@ func (e *Env) LoadClients() error { // Create OCM Authz client if e.Config.OCM.EnableMock { - glog.Infof("Using Mock OCM Authz Client") + logger.Info(ctx, "Using Mock OCM Authz Client") e.Clients.OCM, err = ocm.NewClientMock(ocmConfig) } else { e.Clients.OCM, err = ocm.NewClient(ocmConfig) } if err != nil { - glog.Errorf("Unable to create OCM Authz client: %s", err.Error()) + logger.Errorf(ctx, "Unable to create OCM Authz client: %s", err) return err } @@ -150,22 +160,24 @@ func (e *Env) LoadClients() error { } func (e *Env) Teardown() { + ctx := context.Background() if e.Database.SessionFactory != nil { if err := e.Database.SessionFactory.Close(); err != nil { - glog.Errorf("Error closing database session factory: %s", err.Error()) + logger.Errorf(ctx, "Error closing database session factory: %s", err) } } if e.Clients.OCM != nil { if err := e.Clients.OCM.Close(); err != nil { - glog.Errorf("Error closing OCM client: %v", err) + logger.Errorf(ctx, "Error closing OCM client: %v", err) } } } func setConfigDefaults(flags *pflag.FlagSet, defaults map[string]string) error { + ctx := context.Background() for name, value := range defaults { if err := flags.Set(name, value); err != nil { - glog.Errorf("Error setting flag %s: %v", name, err) + logger.Error(ctx, "Error setting flag", "flag", name, "error", err) return err } } diff --git a/cmd/hyperfleet-api/main.go b/cmd/hyperfleet-api/main.go index 7048b44..c274437 100755 --- a/cmd/hyperfleet-api/main.go +++ b/cmd/hyperfleet-api/main.go @@ -1,13 +1,16 @@ package main import ( + "context" "flag" + "log/slog" + "os" - "github.com/golang/glog" "github.com/spf13/cobra" "github.com/openshift-hyperfleet/hyperfleet-api/cmd/hyperfleet-api/migrate" "github.com/openshift-hyperfleet/hyperfleet-api/cmd/hyperfleet-api/servecmd" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/logger" // Import plugins to trigger their init() functions // _ "github.com/openshift-hyperfleet/hyperfleet-api/plugins/events" // REMOVED: Events plugin no longer exists @@ -20,18 +23,14 @@ import ( // nolint func main() { - // This is needed to make `glog` believe that the flags have already been parsed, otherwise - // every log messages is prefixed by an error message stating the the flags haven't been - // parsed. - if err := flag.CommandLine.Parse([]string{}); err != nil { - glog.Fatalf("Failed to parse flags: %v", err) - } - - //pflag.CommandLine.AddGoFlagSet(flag.CommandLine) + // Initialize logger first (before any logging occurs) + initDefaultLogger() + ctx := context.Background() - // Always log to stderr by default - if err := flag.Set("logtostderr", "true"); err != nil { - glog.Infof("Unable to set logtostderr to true") + // Parse flags (needed for cobra compatibility) + if err := flag.CommandLine.Parse([]string{}); err != nil { + logger.Error(ctx, "Failed to parse flags", "error", err) + os.Exit(1) } rootCmd := &cobra.Command{ @@ -47,6 +46,29 @@ func main() { rootCmd.AddCommand(migrateCmd, serveCmd) if err := rootCmd.Execute(); err != nil { - glog.Fatalf("error running command: %v", err) + logger.Error(ctx, "Error running command", "error", err) + os.Exit(1) + } +} + +// initDefaultLogger initializes a default logger with INFO level +// This ensures logging works before environment/config is loaded +func initDefaultLogger() { + cfg := &logger.LogConfig{ + Level: slog.LevelInfo, + Format: logger.FormatJSON, + Output: os.Stdout, + Component: "hyperfleet-api", + Version: "unknown", + Hostname: getHostname(), + } + logger.InitGlobalLogger(cfg) +} + +func getHostname() string { + hostname, err := os.Hostname() + if err != nil { + return "unknown" } + return hostname } diff --git a/cmd/hyperfleet-api/migrate/cmd.go b/cmd/hyperfleet-api/migrate/cmd.go index adcbb71..ada3d19 100755 --- a/cmd/hyperfleet-api/migrate/cmd.go +++ b/cmd/hyperfleet-api/migrate/cmd.go @@ -3,13 +3,14 @@ package migrate import ( "context" "flag" + "os" - "github.com/golang/glog" - "github.com/openshift-hyperfleet/hyperfleet-api/pkg/db/db_session" "github.com/spf13/cobra" "github.com/openshift-hyperfleet/hyperfleet-api/pkg/config" "github.com/openshift-hyperfleet/hyperfleet-api/pkg/db" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/db/db_session" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/logger" ) var dbConfig = config.NewDatabaseConfig() @@ -29,13 +30,28 @@ func NewMigrateCommand() *cobra.Command { } func runMigrate(_ *cobra.Command, _ []string) { + ctx := context.Background() err := dbConfig.ReadFiles() if err != nil { - glog.Fatal(err) + logger.Error(ctx, "Fatal error", "error", err) + os.Exit(1) } connection := db_session.NewProdFactory(dbConfig) - if err := db.Migrate(connection.New(context.Background())); err != nil { - glog.Fatal(err) + defer func() { + if closeErr := connection.Close(); closeErr != nil { + logger.Error(ctx, "Failed to close database connection", "error", closeErr) + } + }() + + if err := db.Migrate(connection.New(ctx)); err != nil { + logger.Error(ctx, "Migration failed", "error", err) + // Close connection before exit to avoid resource leak + if closeErr := connection.Close(); closeErr != nil { + logger.Error(ctx, "Failed to close database connection", "error", closeErr) + } + os.Exit(1) } + + logger.Info(ctx, "Migration completed successfully") } diff --git a/cmd/hyperfleet-api/servecmd/cmd.go b/cmd/hyperfleet-api/servecmd/cmd.go index 62a82c6..de75ce5 100755 --- a/cmd/hyperfleet-api/servecmd/cmd.go +++ b/cmd/hyperfleet-api/servecmd/cmd.go @@ -5,15 +5,17 @@ import ( "log/slog" "os" - "github.com/golang/glog" "github.com/spf13/cobra" "github.com/openshift-hyperfleet/hyperfleet-api/cmd/hyperfleet-api/environments" "github.com/openshift-hyperfleet/hyperfleet-api/cmd/hyperfleet-api/server" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/api" "github.com/openshift-hyperfleet/hyperfleet-api/pkg/logger" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/telemetry" ) func NewServeCommand() *cobra.Command { + ctx := context.Background() cmd := &cobra.Command{ Use: "serve", Short: "Serve the hyperfleet", @@ -22,31 +24,54 @@ func NewServeCommand() *cobra.Command { } err := environments.Environment().AddFlags(cmd.PersistentFlags()) if err != nil { - glog.Fatalf("Unable to add environment flags to serve command: %s", err.Error()) + logger.Error(ctx, "Unable to add environment flags to serve command", "error", err) + os.Exit(1) } return cmd } func runServe(cmd *cobra.Command, args []string) { + ctx := context.Background() + + // Initialize environment (loads configuration) err := environments.Environment().Initialize() if err != nil { - glog.Fatalf("Unable to initialize environment: %s", err.Error()) + logger.Error(ctx, "Unable to initialize environment", "error", err) + os.Exit(1) } - // Initialize slog logger (demonstration only, full migration in PR 3) - ctx := context.Background() - hostname, _ := os.Hostname() - logConfig := &logger.LogConfig{ - Level: slog.LevelInfo, - Format: logger.FormatJSON, - Output: os.Stdout, - Component: "api", - Version: "dev", - Hostname: hostname, + // Bind environment variables for advanced configuration (OTel, Masking) + environments.Environment().Config.Logging.BindEnv() + + // Initialize slog logger + initLogger() + + // Initialize OpenTelemetry (if enabled) + if environments.Environment().Config.Logging.OTel.Enabled { + samplingRate := environments.Environment().Config.Logging.OTel.SamplingRate + tp, err := telemetry.InitTraceProvider("hyperfleet-api", api.Version, samplingRate) + if err != nil { + logger.Warn(ctx, "Failed to initialize OpenTelemetry", "error", err) + } else { + defer func() { + if err := telemetry.Shutdown(context.Background(), tp); err != nil { + logger.Error(ctx, "Failed to shutdown OpenTelemetry", "error", err) + } + }() + logger.Info(ctx, "OpenTelemetry initialized", "sampling_rate", samplingRate) + } + } else { + logger.Info(ctx, "OpenTelemetry disabled", "otel_enabled", false) } - logger.InitGlobalLogger(logConfig) - logger.Info(ctx, "New slog logger initialized (example)", "log_level", "info", "log_format", "json") + + // Log configuration + logger.Info(ctx, "Logger initialized", + "log_level", environments.Environment().Config.Logging.Level, + "log_format", environments.Environment().Config.Logging.Format, + "log_output", environments.Environment().Config.Logging.Output, + "masking_enabled", environments.Environment().Config.Logging.Masking.Enabled, + ) // Run the servers go func() { @@ -66,3 +91,48 @@ func runServe(cmd *cobra.Command, args []string) { select {} } + +// initLogger initializes the global slog logger from configuration +func initLogger() { + ctx := context.Background() + cfg := environments.Environment().Config.Logging + + // Parse log level + level, err := logger.ParseLogLevel(cfg.Level) + if err != nil { + logger.Warn(ctx, "Invalid log level, using default", "level", cfg.Level, "error", err) + level = slog.LevelInfo // Default to info + } + + // Parse log format + format, err := logger.ParseLogFormat(cfg.Format) + if err != nil { + logger.Warn(ctx, "Invalid log format, using default", "format", cfg.Format, "error", err) + format = logger.FormatJSON // Default to JSON + } + + // Parse log output + output, err := logger.ParseLogOutput(cfg.Output) + if err != nil { + logger.Warn(ctx, "Invalid log output, using default", "output", cfg.Output, "error", err) + output = os.Stdout // Default to stdout + } + + // Get hostname + hostname, _ := os.Hostname() + + // Create logger config + logConfig := &logger.LogConfig{ + Level: level, + Format: format, + Output: output, + Component: "api", + Version: api.Version, + Hostname: hostname, + } + + // Reconfigure global logger with environment config + // Use ReconfigureGlobalLogger instead of InitGlobalLogger because + // InitGlobalLogger was already called in main() with default config + logger.ReconfigureGlobalLogger(logConfig) +} diff --git a/cmd/hyperfleet-api/server/api_server.go b/cmd/hyperfleet-api/server/api_server.go index 58fce4a..4c173e9 100755 --- a/cmd/hyperfleet-api/server/api_server.go +++ b/cmd/hyperfleet-api/server/api_server.go @@ -5,16 +5,16 @@ import ( "fmt" "net" "net/http" + "os" "time" _ "github.com/auth0/go-jwt-middleware" _ "github.com/golang-jwt/jwt/v4" - "github.com/golang/glog" gorillahandlers "github.com/gorilla/handlers" - sdk "github.com/openshift-online/ocm-sdk-go" "github.com/openshift-online/ocm-sdk-go/authentication" "github.com/openshift-hyperfleet/hyperfleet-api/cmd/hyperfleet-api/environments" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/logger" ) type apiServer struct { @@ -36,14 +36,11 @@ func NewAPIServer() Server { var mainHandler http.Handler = mainRouter if env().Config.Server.EnableJWT { - // Create the logger for the authentication handler: - authnLogger, err := sdk.NewGlogLoggerBuilder(). - InfoV(glog.Level(1)). - DebugV(glog.Level(5)). - Build() - check(err, "Unable to create authentication logger") + // Create the logger for the authentication handler using slog bridge + authnLogger := logger.NewOCMLoggerBridge() // Create the handler that verifies that tokens are valid: + var err error mainHandler, err = authentication.NewHandler(). Logger(authnLogger). KeysFile(env().Config.Server.JwkCertFile). @@ -106,6 +103,7 @@ func NewAPIServer() Server { // Serve start the blocking call to Serve. // Useful for breaking up ListenAndServer (Start) when you require the server to be listening before continuing func (s apiServer) Serve(listener net.Listener) { + ctx := context.Background() var err error if env().Config.Server.EnableHTTPS { // Check https cert and key path path @@ -117,16 +115,16 @@ func (s apiServer) Serve(listener net.Listener) { } // Serve with TLS - glog.Infof("Serving with TLS at %s", env().Config.Server.BindAddress) + logger.Info(ctx, "Serving with TLS", "bind_address", env().Config.Server.BindAddress) err = s.httpServer.ServeTLS(listener, env().Config.Server.HTTPSCertFile, env().Config.Server.HTTPSKeyFile) } else { - glog.Infof("Serving without TLS at %s", env().Config.Server.BindAddress) + logger.Info(ctx, "Serving without TLS", "bind_address", env().Config.Server.BindAddress) err = s.httpServer.Serve(listener) } // Web server terminated. check(err, "Web server terminated with errors") - glog.Info("Web server terminated") + logger.Info(ctx, "Web server terminated") } // Listen only start the listener, not the server. @@ -137,9 +135,11 @@ func (s apiServer) Listen() (listener net.Listener, err error) { // Start listening on the configured port and start the server. This is a convenience wrapper for Listen() and Serve(listener Listener) func (s apiServer) Start() { + ctx := context.Background() listener, err := s.Listen() if err != nil { - glog.Fatalf("Unable to start API server: %s", err) + logger.Error(ctx, "Unable to start API server", "error", err) + os.Exit(1) } s.Serve(listener) @@ -147,7 +147,7 @@ func (s apiServer) Start() { // we need to explicitly close Go's sql connection pool. // this needs to be called *exactly* once during an app's lifetime. if err := env().Database.SessionFactory.Close(); err != nil { - glog.Errorf("Error closing database connection: %v", err) + logger.Error(ctx, "Error closing database connection", "error", err) } } diff --git a/cmd/hyperfleet-api/server/healthcheck_server.go b/cmd/hyperfleet-api/server/healthcheck_server.go index f677234..0c7d30a 100755 --- a/cmd/hyperfleet-api/server/healthcheck_server.go +++ b/cmd/hyperfleet-api/server/healthcheck_server.go @@ -7,8 +7,9 @@ import ( "net/http" health "github.com/docker/go-healthcheck" - "github.com/golang/glog" "github.com/gorilla/mux" + + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/logger" ) var ( @@ -40,6 +41,7 @@ func NewHealthCheckServer() *healthCheckServer { } func (s healthCheckServer) Start() { + ctx := context.Background() var err error if env().Config.HealthCheck.EnableHTTPS { if env().Config.Server.HTTPSCertFile == "" || env().Config.Server.HTTPSKeyFile == "" { @@ -50,14 +52,14 @@ func (s healthCheckServer) Start() { } // Serve with TLS - glog.Infof("Serving HealthCheck with TLS at %s", env().Config.HealthCheck.BindAddress) + logger.Infof(ctx, "Serving HealthCheck with TLS at %s", env().Config.HealthCheck.BindAddress) err = s.httpServer.ListenAndServeTLS(env().Config.Server.HTTPSCertFile, env().Config.Server.HTTPSKeyFile) } else { - glog.Infof("Serving HealthCheck without TLS at %s", env().Config.HealthCheck.BindAddress) + logger.Infof(ctx, "Serving HealthCheck without TLS at %s", env().Config.HealthCheck.BindAddress) err = s.httpServer.ListenAndServe() } check(err, "HealthCheck server terminated with errors") - glog.Infof("HealthCheck server terminated") + logger.Info(ctx, "HealthCheck server terminated") } func (s healthCheckServer) Stop() error { diff --git a/cmd/hyperfleet-api/server/logging/formatter.go b/cmd/hyperfleet-api/server/logging/formatter.go deleted file mode 100755 index 547bcef..0000000 --- a/cmd/hyperfleet-api/server/logging/formatter.go +++ /dev/null @@ -1,8 +0,0 @@ -package logging - -import "net/http" - -type LogFormatter interface { - FormatRequestLog(request *http.Request) (string, error) - FormatResponseLog(responseInfo *ResponseInfo) (string, error) -} diff --git a/cmd/hyperfleet-api/server/logging/formatter_json.go b/cmd/hyperfleet-api/server/logging/formatter_json.go deleted file mode 100755 index 9d0062c..0000000 --- a/cmd/hyperfleet-api/server/logging/formatter_json.go +++ /dev/null @@ -1,62 +0,0 @@ -package logging - -import ( - "encoding/json" - "io" - "net/http" - - "github.com/golang/glog" -) - -func NewJSONLogFormatter() *jsonLogFormatter { - return &jsonLogFormatter{} -} - -type jsonLogFormatter struct{} - -var _ LogFormatter = &jsonLogFormatter{} - -func (f *jsonLogFormatter) FormatRequestLog(r *http.Request) (string, error) { - jsonlog := jsonRequestLog{ - Method: r.Method, - RequestURI: r.RequestURI, - RemoteAddr: r.RemoteAddr, - } - if glog.V(10) { - jsonlog.Header = r.Header - jsonlog.Body = r.Body - } - - log, err := json.Marshal(jsonlog) - if err != nil { - return "", err - } - return string(log[:]), nil -} - -func (f *jsonLogFormatter) FormatResponseLog(info *ResponseInfo) (string, error) { - jsonlog := jsonResponseLog{Header: nil, Status: info.Status, Elapsed: info.Elapsed} - if glog.V(10) { - jsonlog.Body = string(info.Body[:]) - } - log, err := json.Marshal(jsonlog) - if err != nil { - return "", err - } - return string(log[:]), nil -} - -type jsonRequestLog struct { - Method string `json:"request_method"` - RequestURI string `json:"request_url"` - Header http.Header `json:"request_header,omitempty"` - Body io.ReadCloser `json:"request_body,omitempty"` - RemoteAddr string `json:"request_remote_ip,omitempty"` -} - -type jsonResponseLog struct { - Header http.Header `json:"response_header,omitempty"` - Status int `json:"response_status,omitempty"` - Body string `json:"response_body,omitempty"` - Elapsed string `json:"elapsed,omitempty"` -} diff --git a/cmd/hyperfleet-api/server/logging/logging.go b/cmd/hyperfleet-api/server/logging/logging.go deleted file mode 100755 index 1c889df..0000000 --- a/cmd/hyperfleet-api/server/logging/logging.go +++ /dev/null @@ -1,3 +0,0 @@ -package logging - -const Threshold int32 = 1 diff --git a/cmd/hyperfleet-api/server/logging/request_logging_middleware.go b/cmd/hyperfleet-api/server/logging/request_logging_middleware.go index 9c2f694..1fddaab 100755 --- a/cmd/hyperfleet-api/server/logging/request_logging_middleware.go +++ b/cmd/hyperfleet-api/server/logging/request_logging_middleware.go @@ -1,30 +1,77 @@ package logging import ( + "log/slog" "net/http" "strings" "time" + + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/logger" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/middleware" ) -func RequestLoggingMiddleware(handler http.Handler) http.Handler { - return http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) { - path := strings.TrimSuffix(request.URL.Path, "/") - // these contribute greatly to log spam but are not useful or meaningful. - // consider a list/map of URLs should this grow in the future. - doLog := path != "/api/hyperfleet" +// RequestLoggingMiddleware logs HTTP request and response information using slog +// Automatically masks sensitive headers and body fields using the provided masker +func RequestLoggingMiddleware(masker *middleware.MaskingMiddleware) func(http.Handler) http.Handler { + return func(handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + // Skip health check endpoint to reduce log spam + if strings.TrimSuffix(r.URL.Path, "/") == "/api/hyperfleet" { + handler.ServeHTTP(w, r) + return + } + + // Mask sensitive headers + maskedHeaders := masker.MaskHeaders(r.Header) - loggingWriter := NewLoggingWriter(writer, request, NewJSONLogFormatter()) + // Log incoming request + logger.Info(ctx, "HTTP request received", + logger.HTTPMethod(r.Method), + logger.HTTPPath(r.URL.Path), + slog.String("remote_addr", r.RemoteAddr), + logger.HTTPUserAgent(r.UserAgent()), + slog.Any("headers", maskedHeaders), + ) - if doLog { - loggingWriter.log(loggingWriter.prepareRequestLog()) - } + // Wrap response writer to capture status code + rw := &responseWriter{ResponseWriter: w} - before := time.Now() - handler.ServeHTTP(loggingWriter, request) - elapsed := time.Since(before).String() + // Execute handler and measure duration + start := time.Now() + handler.ServeHTTP(rw, r) + duration := time.Since(start) + + // Log response + logger.Info(ctx, "HTTP request completed", + logger.HTTPMethod(r.Method), + logger.HTTPPath(r.URL.Path), + logger.HTTPStatusCode(rw.statusCode), + logger.HTTPDuration(duration), + slog.String("remote_addr", r.RemoteAddr), + logger.HTTPUserAgent(r.UserAgent()), + ) + }) + } +} + +// responseWriter wraps http.ResponseWriter to capture status code +type responseWriter struct { + http.ResponseWriter + statusCode int +} + +// WriteHeader captures the status code before writing +func (rw *responseWriter) WriteHeader(code int) { + rw.statusCode = code + rw.ResponseWriter.WriteHeader(code) +} - if doLog { - loggingWriter.log(loggingWriter.prepareResponseLog(elapsed)) - } - }) +// Write captures the implicit 200 status code when WriteHeader wasn't called +func (rw *responseWriter) Write(b []byte) (int, error) { + if rw.statusCode == 0 { + rw.statusCode = http.StatusOK + } + return rw.ResponseWriter.Write(b) } diff --git a/cmd/hyperfleet-api/server/logging/responseinfo.go b/cmd/hyperfleet-api/server/logging/responseinfo.go deleted file mode 100755 index 089a120..0000000 --- a/cmd/hyperfleet-api/server/logging/responseinfo.go +++ /dev/null @@ -1,10 +0,0 @@ -package logging - -import "net/http" - -type ResponseInfo struct { - Header http.Header `json:"response_header,omitempty"` - Body []byte `json:"response_body,omitempty"` - Status int `json:"response_status,omitempty"` - Elapsed string `json:"elapsed,omitempty"` -} diff --git a/cmd/hyperfleet-api/server/logging/writer.go b/cmd/hyperfleet-api/server/logging/writer.go deleted file mode 100755 index f134b10..0000000 --- a/cmd/hyperfleet-api/server/logging/writer.go +++ /dev/null @@ -1,54 +0,0 @@ -package logging - -import ( - "net/http" - - "github.com/openshift-hyperfleet/hyperfleet-api/pkg/logger" -) - -func NewLoggingWriter(w http.ResponseWriter, r *http.Request, f LogFormatter) *loggingWriter { - return &loggingWriter{ResponseWriter: w, request: r, formatter: f} -} - -type loggingWriter struct { - http.ResponseWriter - request *http.Request - formatter LogFormatter - responseStatus int - responseBody []byte -} - -func (writer *loggingWriter) Write(body []byte) (int, error) { - writer.responseBody = body - return writer.ResponseWriter.Write(body) -} - -func (writer *loggingWriter) WriteHeader(status int) { - writer.responseStatus = status - writer.ResponseWriter.WriteHeader(status) -} - -func (writer *loggingWriter) log(logMsg string, err error) { - log := logger.NewOCMLogger(writer.request.Context()) - switch err { - case nil: - log.V(Threshold).Infof(logMsg) - default: - log.Extra("error", err.Error()).Error("Unable to format request/response for log.") - } -} - -func (writer *loggingWriter) prepareRequestLog() (string, error) { - return writer.formatter.FormatRequestLog(writer.request) -} - -func (writer *loggingWriter) prepareResponseLog(elapsed string) (string, error) { - info := &ResponseInfo{ - Header: writer.Header(), - Body: writer.responseBody, - Status: writer.responseStatus, - Elapsed: elapsed, - } - - return writer.formatter.FormatResponseLog(info) -} diff --git a/cmd/hyperfleet-api/server/metrics_server.go b/cmd/hyperfleet-api/server/metrics_server.go index 6fa14b0..0c73cd3 100755 --- a/cmd/hyperfleet-api/server/metrics_server.go +++ b/cmd/hyperfleet-api/server/metrics_server.go @@ -45,7 +45,7 @@ func (s metricsServer) Serve(listener net.Listener) { } func (s metricsServer) Start() { - log := logger.NewOCMLogger(context.Background()) + ctx := context.Background() var err error if env().Config.Metrics.EnableHTTPS { if env().Config.Server.HTTPSCertFile == "" || env().Config.Server.HTTPSKeyFile == "" { @@ -55,15 +55,16 @@ func (s metricsServer) Start() { ) } - // Serve with TLS - log.Infof("Serving Metrics with TLS at %s", env().Config.Server.BindAddress) + logger.Info(ctx, "Serving Metrics with TLS", + "bind_address", env().Config.Metrics.BindAddress) err = s.httpServer.ListenAndServeTLS(env().Config.Server.HTTPSCertFile, env().Config.Server.HTTPSKeyFile) } else { - log.Infof("Serving Metrics without TLS at %s", env().Config.Metrics.BindAddress) + logger.Info(ctx, "Serving Metrics without TLS", + "bind_address", env().Config.Metrics.BindAddress) err = s.httpServer.ListenAndServe() } check(err, "Metrics server terminated with errors") - log.Infof("Metrics server terminated") + logger.Info(ctx, "Metrics server terminated") } func (s metricsServer) Stop() error { diff --git a/cmd/hyperfleet-api/server/routes.go b/cmd/hyperfleet-api/server/routes.go index f94b907..0f3c170 100755 --- a/cmd/hyperfleet-api/server/routes.go +++ b/cmd/hyperfleet-api/server/routes.go @@ -74,8 +74,11 @@ func (s *apiServer) routes() *mux.Router { mainRouter.Use(middleware.OTelMiddleware) } + // Initialize masking middleware once (reused across all requests) + masker := middleware.NewMaskingMiddleware(env().Config.Logging) + // Request logging middleware logs pertinent information about the request and response - mainRouter.Use(logging.RequestLoggingMiddleware) + mainRouter.Use(logging.RequestLoggingMiddleware(masker)) // /api/hyperfleet apiRouter := mainRouter.PathPrefix("/api/hyperfleet").Subrouter() @@ -113,19 +116,21 @@ func registerApiMiddleware(router *mux.Router) { // Initialize schema validator (non-blocking - will warn if schema not found) // Use background context for initialization logging ctx := context.Background() - log := logger.NewOCMLogger(ctx) schemaValidator, err := validators.NewSchemaValidator(schemaPath) if err != nil { // Log warning but don't fail - schema validation is optional - log.Extra("schema_path", schemaPath).Extra("error", err.Error()).Warning("Failed to load schema validator") - log.Warning("Schema validation is disabled. Spec fields will not be validated.") - log.Info("To enable schema validation:") - log.Info(" - Local: Run from repo root, or set OPENAPI_SCHEMA_PATH=openapi/openapi.yaml") - log.Info(" - Production: Helm sets OPENAPI_SCHEMA_PATH=/etc/hyperfleet/schemas/openapi.yaml") + logger.Warn(ctx, "Failed to load schema validator", + "schema_path", schemaPath, + "error", err.Error()) + logger.Warn(ctx, "Schema validation is disabled. Spec fields will not be validated.") + logger.Info(ctx, "To enable schema validation:") + logger.Info(ctx, " - Local: Run from repo root, or set OPENAPI_SCHEMA_PATH=openapi/openapi.yaml") + logger.Info(ctx, " - Production: Helm sets OPENAPI_SCHEMA_PATH=/etc/hyperfleet/schemas/openapi.yaml") } else { // Apply schema validation middleware - log.Extra("schema_path", schemaPath).Info("Schema validation enabled") + logger.Info(ctx, "Schema validation enabled", + "schema_path", schemaPath) router.Use(middleware.SchemaValidationMiddleware(schemaValidator)) } diff --git a/cmd/hyperfleet-api/server/server.go b/cmd/hyperfleet-api/server/server.go index 5d197f7..c8e1949 100755 --- a/cmd/hyperfleet-api/server/server.go +++ b/cmd/hyperfleet-api/server/server.go @@ -1,12 +1,13 @@ package server import ( + "context" "net" "net/http" "os" "strings" - "github.com/golang/glog" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/logger" ) type Server interface { @@ -25,8 +26,9 @@ func removeTrailingSlash(next http.Handler) http.Handler { // Exit on error func check(err error, msg string) { + ctx := context.Background() if err != nil && err != http.ErrServerClosed { - glog.Errorf("%s: %s", msg, err) + logger.Error(ctx, msg, "error", err) os.Exit(1) } } diff --git a/docs/logging.md b/docs/logging.md new file mode 100644 index 0000000..39964be --- /dev/null +++ b/docs/logging.md @@ -0,0 +1,509 @@ +# HyperFleet API Logging + +This document describes the logging system used in hyperfleet-api. + +## Overview + +HyperFleet API uses Go's standard library `log/slog` for structured logging with the following features: + +- **Structured logging**: All logs use key-value pairs for better queryability +- **Context-aware logging**: Automatic operation_id, trace_id, and span_id propagation +- **Data masking**: Sensitive data redaction in headers and JSON payloads +- **OpenTelemetry integration**: Distributed tracing with configurable sampling +- **JSON and text output**: Machine-parseable JSON or human-readable text format +- **Custom handler**: Automatic component, version, and hostname fields + +## Architecture + +### Components + +1. **pkg/logger/logger.go**: Core logger with HyperFleetHandler (custom slog.Handler) +2. **pkg/logger/context.go**: Context key definitions for trace_id, span_id, cluster_id, etc. +3. **pkg/logger/operationid_middleware.go**: Operation ID generation and middleware +4. **pkg/logger/ocm_bridge.go**: OCMLogger interface for backward compatibility +5. **pkg/middleware/otel.go**: OpenTelemetry trace context extraction +6. **pkg/telemetry/otel.go**: OpenTelemetry trace provider initialization + +### Middleware Chain + +```text +HTTP Request + ↓ +OperationIDMiddleware (adds operation_id to context) + ↓ +OTelMiddleware (extracts trace_id and span_id, optional) + ↓ +RequestLoggingMiddleware (logs request/response with masking) + ↓ +Handler (business logic) +``` + +## Configuration + +### Environment Variables + +```bash +# Logging level (debug, info, warn, error) +export LOGGING_LEVEL=info + +# JSON output (true/false) +export LOGGING_JSON=true + +# Enable OpenTelemetry (true/false) +export LOGGING_OTEL_ENABLED=true + +# OpenTelemetry sampling rate (0.0 to 1.0) +# 0.0 = no traces, 1.0 = all traces +export LOGGING_OTEL_SAMPLING_RATE=0.1 + +# Data masking (true/false) +export LOGGING_MASKING_ENABLED=true + +# Headers to mask (comma-separated) +export LOGGING_MASKING_HEADERS="Authorization,Cookie,X-API-Key" + +# JSON body fields to mask (comma-separated) +export LOGGING_MASKING_BODY_FIELDS="password,token,secret,api_key" +``` + +### Configuration Struct + +```go +type LoggingConfig struct { + Level string `env:"LOGGING_LEVEL" envDefault:"info"` + JSON bool `env:"LOGGING_JSON" envDefault:"true"` + OTel OTelConfig `env:",prefix=LOGGING_OTEL_"` + Masking MaskingConfig `env:",prefix=LOGGING_MASKING_"` +} + +type OTelConfig struct { + Enabled bool `env:"ENABLED" envDefault:"true"` + SamplingRate float64 `env:"SAMPLING_RATE" envDefault:"0.1"` +} + +type MaskingConfig struct { + Enabled bool `env:"ENABLED" envDefault:"true"` + Headers []string `env:"HEADERS" envSeparator:","` + BodyFields []string `env:"BODY_FIELDS" envSeparator:","` +} +``` + +## Usage + +### Basic Logging + +Always use context-aware logging to include automatic fields (operation_id, trace_id, span_id): + +```go +import ( + "context" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/logger" +) + +func MyHandler(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + // Info level + logger.Info(ctx, "Processing cluster creation", + "cluster_id", clusterID, + "region", region) + + // Error level (automatically includes stack trace) + logger.Error(ctx, "Database connection failed", + "host", "localhost", + "error", err) + + // Debug level + logger.Debug(ctx, "Cache hit", "key", "cluster:123") + + // Warning level + logger.Warn(ctx, "High memory usage", + "used_mb", 1024, + "threshold_mb", 800) +} +``` + +Logs automatically include: +- `component`: "hyperfleet-api" +- `version`: Application version +- `hostname`: Pod/host name +- `operation_id`: Unique request identifier +- `trace_id`: W3C trace ID (when OTel enabled) +- `span_id`: Current span ID (when OTel enabled) + +### Available Functions + +```go +// Standard logging functions +logger.Info(ctx, msg, "key", "value") +logger.Warn(ctx, msg, "key", "value") +logger.Error(ctx, msg, "key", "value") +logger.Debug(ctx, msg, "key", "value") + +// Formatted logging functions (prefer structured logging above) +logger.Infof(ctx, "Processing %d items", count) +logger.Warnf(ctx, "Retry %d/%d", attempt, maxRetries) +logger.Errorf(ctx, "Failed: %v", err) + +// Add context fields +ctx = logger.WithClusterID(ctx, "cluster-123") +ctx = logger.WithResourceType(ctx, "managed-cluster") +ctx = logger.WithResourceID(ctx, "resource-456") +``` + +## Log Output Examples + +This section shows actual log output from HyperFleet API in key scenarios. + +### Example 1: Basic Structured Logging + +**Code**: +```go +logger.Info(ctx, "Server started", "port", 8080, "environment", "production") +``` + +**JSON Output** (`LOGGING_JSON=true`): +```json +{ + "timestamp": "2026-01-07T18:30:45.123Z", + "level": "info", + "message": "Server started", + "component": "hyperfleet-api", + "version": "v1.2.3", + "hostname": "hyperfleet-api-pod-7f9c8b", + "operation_id": "op-2C9zKDz8xQMqF3yH", + "port": 8080, + "environment": "production" +} +``` + +**Text Output** (`LOGGING_JSON=false`): +```text +time=2026-01-07T18:30:45.123Z level=INFO msg="Server started" component=hyperfleet-api version=v1.2.3 hostname=hyperfleet-api-pod-7f9c8b operation_id=op-2C9zKDz8xQMqF3yH port=8080 environment=production +``` + +### Example 2: OpenTelemetry Integration + +**Configuration**: `LOGGING_OTEL_ENABLED=true` + +**Code**: +```go +ctx = logger.WithClusterID(ctx, "cluster-abc123") +logger.Info(ctx, "Processing cluster update", "status", "ready") +``` + +**Output** (showing only new fields): +```json +{ + "timestamp": "2026-01-07T18:30:45.456Z", + "level": "info", + "message": "Processing cluster update", + "operation_id": "op-2C9zKDz8xQMqF3yH", + "trace_id": "4bf92f3577b34da6a3ce929d0e0e4736", + "span_id": "00f067aa0ba902b7", + "cluster_id": "cluster-abc123", + "status": "ready", + "...": "standard fields omitted" +} +``` + +**Key additions**: +- `trace_id`: W3C trace context for distributed tracing +- `span_id`: Current operation span identifier + +### Example 3: HTTP Request Correlation + +**Output** (request and response share same IDs): +```json +// Request log +{ + "level": "info", + "message": "HTTP request received", + "operation_id": "op-3D8xLEy9yRNrG4zI", + "trace_id": "5cg03g4688c45eb7b4df030e1f1f5847", + "span_id": "11g178bb1cb013c8", + "method": "POST", + "path": "/api/hyperfleet/v1/clusters", + "...": "standard fields omitted" +} + +// Response log (same operation_id, trace_id, span_id) +{ + "level": "info", + "message": "HTTP request completed", + "operation_id": "op-3D8xLEy9yRNrG4zI", + "trace_id": "5cg03g4688c45eb7b4df030e1f1f5847", + "span_id": "11g178bb1cb013c8", + "status_code": 201, + "duration_ms": 445, + "...": "standard fields omitted" +} +``` + +### Example 4: Error Logs with Stack Traces + +**Code**: +```go +logger.Error(ctx, "Failed to connect to database", "host", "postgres.svc", "error", err) +``` + +**Output**: +```json +{ + "level": "error", + "message": "Failed to connect to database", + "host": "postgres.svc", + "error": "dial tcp: lookup postgres.svc: no such host", + "stack_trace": [ + "/workspace/pkg/db/connection.go:45 github.com/openshift-hyperfleet/hyperfleet-api/pkg/db.Connect", + "/workspace/pkg/db/factory.go:78 github.com/openshift-hyperfleet/hyperfleet-api/pkg/db.NewSessionFactory", + "/workspace/cmd/hyperfleet-api/servecmd/cmd.go:123 main.setupDatabase" + ], + "...": "standard fields omitted" +} +``` + +**Note**: Error-level logs automatically include stack traces (15 frames max, filtered for relevance). + +### Example 5: Context Fields + +**Code**: +```go +ctx = logger.WithClusterID(ctx, "cluster-prod-001") +ctx = logger.WithResourceType(ctx, "managed-cluster") +ctx = logger.WithResourceID(ctx, "mc-abc123") +logger.Info(ctx, "Resource provisioned", "duration_ms", 2500) +``` + +**Output**: +```json +{ + "level": "info", + "message": "Resource provisioned", + "cluster_id": "cluster-prod-001", + "resource_type": "managed-cluster", + "resource_id": "mc-abc123", + "duration_ms": 2500, + "...": "standard fields omitted" +} +``` + +**Available context fields**: `operation_id`, `trace_id`, `span_id`, `cluster_id`, `resource_type`, `resource_id` + +### Example 6: Data Masking + +**Configuration**: `LOGGING_MASKING_ENABLED=true` + +**Request body**: +```json +{"username": "alice@example.com", "password": "SuperSecret123!"} +``` + +**Logged output**: +```json +{ + "level": "info", + "message": "HTTP request received", + "path": "/api/hyperfleet/v1/auth/login", + "headers": { + "Authorization": "***REDACTED***" + }, + "body": { + "username": "alice@example.com", + "password": "***REDACTED***" + }, + "...": "standard fields omitted" +} +``` + +**Security**: Always enable masking in production to prevent credential leakage + +## OpenTelemetry Integration + +### Initialization + +OpenTelemetry is initialized in `cmd/hyperfleet-api/servecmd/cmd.go`: + +```go +if environments.Environment().Config.Logging.OTel.Enabled { + samplingRate := environments.Environment().Config.Logging.OTel.SamplingRate + tp, err := telemetry.InitTraceProvider("hyperfleet-api", api.Version, samplingRate) + if err != nil { + slog.Error("Failed to initialize OpenTelemetry", "error", err) + } else { + defer func() { + if err := tp.Shutdown(context.Background()); err != nil { + slog.Error("Error shutting down tracer provider", "error", err) + } + }() + slog.Info("OpenTelemetry initialized", "sampling_rate", samplingRate) + } +} +``` + +### Trace Propagation + +The OTel middleware automatically: +1. Extracts W3C trace context from incoming HTTP headers +2. Creates or continues spans for each request +3. Injects trace_id and span_id into the logger context +4. Exports traces to stdout (can be configured for other exporters) + +### Sampling + +Configure sampling rate to control trace volume: +- `0.0`: No traces (disabled) +- `0.1`: 10% of requests traced +- `1.0`: All requests traced (use in development only) + +## Data Masking + +Sensitive data is automatically masked when `LOGGING_MASKING_ENABLED=true`: + +**Default masked headers**: `Authorization`, `Cookie`, `X-API-Key`, `X-Auth-Token` +**Default masked fields**: `password`, `token`, `secret`, `api_key`, `client_secret` + +To add custom masking rules: + +```go +env().Config.Logging.Masking.Headers = append( + env().Config.Logging.Masking.Headers, + "X-Custom-Auth-Header", +) + +env().Config.Logging.Masking.BodyFields = append( + env().Config.Logging.Masking.BodyFields, + "credit_card", + "ssn", +) +``` + +## Best Practices + +### 1. Use Context-Aware Logging + +✅ **Good**: +```go +logger.Info(ctx, "User logged in", "user_id", userID) +``` + +❌ **Bad**: +```go +slog.Info("User logged in", "user_id", userID) // Missing context +``` + +### 2. Use Key-Value Pairs + +✅ **Good**: +```go +logger.Info(ctx, "Cluster created", + "cluster_id", clusterID, + "region", region, + "node_count", nodeCount) +``` + +❌ **Bad**: +```go +logger.Info(ctx, fmt.Sprintf("Cluster %s created in %s with %d nodes", + clusterID, region, nodeCount)) // String formatting +``` + +### 3. Log Errors with Context + +✅ **Good**: +```go +logger.Error(ctx, "Database query failed", + "query", "SELECT * FROM clusters", + "error", err, + "duration_ms", elapsed.Milliseconds()) +``` + +❌ **Bad**: +```go +logger.Error(ctx, "Error", "error", err) // No context +``` + +### 4. Choose Appropriate Log Levels + +- **DEBUG**: Detailed diagnostic information (disabled in production) +- **INFO**: General informational messages (default) +- **WARN**: Warning messages for unexpected but handled conditions +- **ERROR**: Error messages for failures + +### 5. Avoid Logging Sensitive Data + +✅ **Good**: +```go +logger.Info(ctx, "User authenticated", "user_id", userID) +``` + +❌ **Bad**: +```go +logger.Info(ctx, "User authenticated", + "user_id", userID, + "password_hash", hash) // Don't log even hashed passwords +``` + +## Troubleshooting + +### Logs Not Appearing + +1. Check log level: `export LOGGING_LEVEL=debug` +2. Verify JSON mode: `export LOGGING_JSON=false` (for human-readable output) +3. Check context propagation: Ensure middleware chain is correct + +### Missing operation_id + +Verify `OperationIDMiddleware` is registered before `RequestLoggingMiddleware`: + +```go +mainRouter.Use(logger.OperationIDMiddleware) +mainRouter.Use(middleware.OTelMiddleware) +mainRouter.Use(logging.RequestLoggingMiddleware) +``` + +### Missing trace_id/span_id + +1. Check OTel is enabled: `export LOGGING_OTEL_ENABLED=true` +2. Verify middleware order: `OTelMiddleware` must be after `OperationIDMiddleware` +3. Check sampling rate: `export LOGGING_OTEL_SAMPLING_RATE=1.0` (for testing) + +### Data Not Masked + +1. Check masking is enabled: `export LOGGING_MASKING_ENABLED=true` +2. Verify field names match configuration (case-insensitive) +3. Check JSON structure: Masking only works on top-level fields + +## Testing + +### Unit Tests + +```go +func TestLogging(t *testing.T) { + // Create context with operation ID + ctx := logger.WithOperationID(context.Background(), "test-op-123") + + // Log with context + logger.Info(ctx, "Test message", "key", "value") + + // Verify operation_id is included + // (Use a test handler to capture logs) +} +``` + +### Integration Tests + +```bash +# Run tests with debug logging +LOGGING_LEVEL=debug OCM_ENV=integration_testing go test ./test/integration/... + +# Run tests without OTel +LOGGING_OTEL_ENABLED=false OCM_ENV=integration_testing go test ./... +``` + +## References + +- [slog Documentation](https://pkg.go.dev/log/slog) +- [OpenTelemetry Go SDK](https://opentelemetry.io/docs/languages/go/) +- [W3C Trace Context](https://www.w3.org/TR/trace-context/) +- [HyperFleet Architecture](https://github.com/openshift-hyperfleet/architecture) diff --git a/go.mod b/go.mod index 11a40e8..867dd17 100755 --- a/go.mod +++ b/go.mod @@ -14,7 +14,6 @@ require ( github.com/ghodss/yaml v1.0.0 github.com/go-gormigrate/gormigrate/v2 v2.0.0 github.com/golang-jwt/jwt/v4 v4.5.2 - github.com/golang/glog v1.2.5 github.com/google/uuid v1.6.0 github.com/gorilla/handlers v1.4.2 github.com/gorilla/mux v1.8.0 @@ -24,12 +23,17 @@ require ( github.com/onsi/gomega v1.27.1 github.com/openshift-online/ocm-sdk-go v0.1.334 github.com/prometheus/client_golang v1.16.0 - github.com/segmentio/ksuid v1.0.2 + github.com/segmentio/ksuid v1.0.4 github.com/spf13/cobra v0.0.5 github.com/spf13/pflag v1.0.5 github.com/testcontainers/testcontainers-go v0.33.0 github.com/testcontainers/testcontainers-go/modules/postgres v0.33.0 github.com/yaacov/tree-search-language v0.0.0-20190923184055-1c2dad2e354b + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0 + go.opentelemetry.io/otel v1.39.0 + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.39.0 + go.opentelemetry.io/otel/sdk v1.39.0 + go.opentelemetry.io/otel/trace v1.39.0 go.uber.org/mock v0.6.0 gopkg.in/resty.v1 v1.12.0 gorm.io/datatypes v1.2.7 @@ -65,10 +69,10 @@ require ( github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/swag v0.23.0 // indirect github.com/go-sql-driver/mysql v1.8.1 // indirect + github.com/golang/glog v1.2.5 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/go-cmp v0.7.0 // indirect github.com/gorilla/css v1.0.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 // indirect github.com/inconshreveable/mousetrap v1.0.0 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect @@ -117,22 +121,16 @@ require ( github.com/woodsbury/decimal128 v1.3.0 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect - go.opentelemetry.io/otel v1.39.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.39.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.39.0 // indirect go.opentelemetry.io/otel/metric v1.39.0 // indirect - go.opentelemetry.io/otel/sdk v1.39.0 // indirect - go.opentelemetry.io/otel/trace v1.39.0 // indirect - golang.org/x/crypto v0.41.0 // indirect - golang.org/x/net v0.43.0 // indirect - golang.org/x/sync v0.17.0 // indirect + golang.org/x/crypto v0.44.0 // indirect + golang.org/x/net v0.47.0 // indirect + golang.org/x/sync v0.18.0 // indirect golang.org/x/sys v0.39.0 // indirect - golang.org/x/text v0.29.0 // indirect + golang.org/x/text v0.31.0 // indirect golang.org/x/time v0.3.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20251014184007-4626949a642f // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20251014184007-4626949a642f // indirect - google.golang.org/grpc v1.75.1 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 // indirect google.golang.org/protobuf v1.36.10 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index 038f8e3..2da6b93 100755 --- a/go.sum +++ b/go.sum @@ -545,15 +545,15 @@ github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+Pymzi github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= -github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/segmentio/ksuid v1.0.2 h1:9yBfKyw4ECGTdALaF09Snw3sLJmYIX6AbPJrAy6MrDc= -github.com/segmentio/ksuid v1.0.2/go.mod h1:BXuJDr2byAiHuQaQtSKoXh1J0YmUDurywOXgB2w+OSU= +github.com/segmentio/ksuid v1.0.4 h1:sBo2BdShXjmcugAMwjugoGUdUV0pcxY5mW4xKRn3v4c= +github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE= github.com/shirou/gopsutil/v3 v3.23.12 h1:z90NtUkp3bMtmICZKpC4+WaknU1eXtp5vtbQ11DgpE4= github.com/shirou/gopsutil/v3 v3.23.12/go.mod h1:1FrWgea594Jp7qmjHUUPlJDTPgcsb9mGnXDxavtikzM= github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= @@ -634,40 +634,34 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= -go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= -go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= -go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0 h1:ssfIgGNANqpVFCndZvcuyKbl0g+UAVcbBcqGkG28H0Y= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0/go.mod h1:GQ/474YrbE4Jx8gZ4q5I4hrhUzM6UPzyrqJYV2AqPoQ= go.opentelemetry.io/otel v1.39.0 h1:8yPrr/S0ND9QEfTfdP9V+SiwT4E0G7Y5MO7p85nis48= go.opentelemetry.io/otel v1.39.0/go.mod h1:kLlFTywNWrFyEdH0oj2xK0bFYZtHRYUdv1NklR/tgc8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 h1:GqRJVj7UmLjCVyVJ3ZFLdPRmhDUp2zFmQe3RHIOsw24= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0/go.mod h1:ri3aaHSmCTVYu2AWv44YMauwAQc0aqI9gHKIcSbI1pU= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 h1:aTL7F04bJHUlztTsNGJ2l+6he8c+y/b//eR0jjjemT4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0/go.mod h1:kldtb7jDTeol0l3ewcmd8SDvx3EmIE7lyvqbasU3QC4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.39.0 h1:f0cb2XPmrqn4XMy9PNliTgRKJgS5WcL/u0/WRYGz4t0= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.39.0/go.mod h1:vnakAaFckOMiMtOIhFI2MNH4FYrZzXCYxmb1LlhoGz8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.39.0 h1:Ckwye2FpXkYgiHX7fyVrN1uA/UYd9ounqqTuSNAv0k4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.39.0/go.mod h1:teIFJh5pW2y+AN7riv6IBPX2DuesS3HgP39mwOspKwU= go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.39.0 h1:8UPA4IbVZxpsD76ihGOQiFml99GPAEZLohDXvqHdi6U= go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.39.0/go.mod h1:MZ1T/+51uIVKlRzGw1Fo46KEWThjlCBZKl2LzY5nv4g= -go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= -go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= go.opentelemetry.io/otel/metric v1.39.0 h1:d1UzonvEZriVfpNKEVmHXbdf909uGTOQjA0HF0Ls5Q0= go.opentelemetry.io/otel/metric v1.39.0/go.mod h1:jrZSWL33sD7bBxg1xjrqyDjnuzTUB0x1nBERXd7Ftcs= -go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= -go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= go.opentelemetry.io/otel/sdk v1.39.0 h1:nMLYcjVsvdui1B/4FRkwjzoRVsMK8uL/cj0OyhKzt18= go.opentelemetry.io/otel/sdk v1.39.0/go.mod h1:vDojkC4/jsTJsE+kh+LXYQlbL8CgrEcwmt1ENZszdJE= -go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= -go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= +go.opentelemetry.io/otel/sdk/metric v1.39.0 h1:cXMVVFVgsIf2YL6QkRF4Urbr/aMInf+2WKg+sEJTtB8= +go.opentelemetry.io/otel/sdk/metric v1.39.0/go.mod h1:xq9HEVH7qeX69/JnwEfp6fVq5wosJsY1mt4lLfYdVew= go.opentelemetry.io/otel/trace v1.39.0 h1:2d2vfpEDmCJ5zVYz7ijaJdOF59xLomrvj7bjt6/qCJI= go.opentelemetry.io/otel/trace v1.39.0/go.mod h1:88w4/PnZSazkGzz/w84VHpQafiU4EtqqlVdxWy+rNOA= -go.opentelemetry.io/proto/otlp v1.7.1 h1:gTOMpGDb0WTBOP8JaO72iL3auEZhVmAQg4ipjOVAtj4= -go.opentelemetry.io/proto/otlp v1.7.1/go.mod h1:b2rVh6rfI/s2pHWNlB7ILJcRALpcNDzKhACevjI+ZnE= +go.opentelemetry.io/proto/otlp v1.9.0 h1:l706jCMITVouPOqEnii2fIAuO3IVGBRPV5ICjceRb/A= +go.opentelemetry.io/proto/otlp v1.9.0/go.mod h1:xE+Cx5E/eEHw+ISFkwPLwCZefwVjY+pqKg1qcK03+/4= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/mock v0.6.0 h1:hyF9dfmbgIX5EfOdasqLsWD6xqpNZlXblLB/Dbnwv3Y= go.uber.org/mock v0.6.0/go.mod h1:KiVJ4BqZJaMj4svdfmHM0AUx4NJYO8ZNpPnZn1Z+BBU= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= @@ -695,8 +689,8 @@ golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220427172511-eb4f295cb31f/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4= -golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc= +golang.org/x/crypto v0.44.0 h1:A97SsFvM3AIwEEmTBiaxPPTYpDC47w720rdiiUvgoAU= +golang.org/x/crypto v0.44.0/go.mod h1:013i+Nw79BMiQiMsOPcVCB5ZIJbYkerPrGnOa00tvmc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -774,8 +768,8 @@ golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= -golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= +golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= +golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190523182746-aaccbc9213b0/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -794,8 +788,8 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= -golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= +golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -860,15 +854,13 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k= -golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4= -golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw= +golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU= +golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -877,8 +869,8 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk= -golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4= +golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= +golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -997,10 +989,10 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto/googleapis/api v0.0.0-20251014184007-4626949a642f h1:OiFuztEyBivVKDvguQJYWq1yDcfAHIID/FVrPR4oiI0= -google.golang.org/genproto/googleapis/api v0.0.0-20251014184007-4626949a642f/go.mod h1:kprOiu9Tr0JYyD6DORrc4Hfyk3RFXqkQ3ctHEum3ZbM= -google.golang.org/genproto/googleapis/rpc v0.0.0-20251014184007-4626949a642f h1:1FTH6cpXFsENbPR5Bu8NQddPSaUUE6NA2XdZdDSAJK4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20251014184007-4626949a642f/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= +google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 h1:fCvbg86sFXwdrl5LgVcTEvNC+2txB5mgROGmRL5mrls= +google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:+rXWjjaukWZun3mLfjmVnQi18E1AsFbDN9QdJ5YXLto= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 h1:gRkg/vSppuSQoDjxyiGfN4Upv/h/DQmIR10ZU8dh4Ww= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -1015,8 +1007,8 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI= -google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= +google.golang.org/grpc v1.77.0 h1:wVVY6/8cGA6vvffn+wWK5ToddbgdU3d8MNENr4evgXM= +google.golang.org/grpc v1.77.0/go.mod h1:z0BY1iVj0q8E1uSQCjL9cppRj+gnZjzDnzV0dHhrNig= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/pkg/api/error.go b/pkg/api/error.go index f5d990a..5b2970a 100755 --- a/pkg/api/error.go +++ b/pkg/api/error.go @@ -1,13 +1,14 @@ package api import ( + "context" "encoding/json" "fmt" "net/http" "os" - "github.com/golang/glog" "github.com/openshift-hyperfleet/hyperfleet-api/pkg/errors" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/logger" ) // SendNotFound sends a 404 response with some details about the non existing resource. @@ -39,7 +40,7 @@ func SendNotFound(w http.ResponseWriter, r *http.Request) { _, err = w.Write(data) if err != nil { err = fmt.Errorf("can't send response body for request '%s'", r.URL.Path) - glog.Error(err) + logger.Error(r.Context(), "Failed to send response", "error", err) return } } @@ -60,7 +61,7 @@ func SendUnauthorized(w http.ResponseWriter, r *http.Request, message string) { _, err = w.Write(data) if err != nil { err = fmt.Errorf("can't send response body for request '%s'", r.URL.Path) - glog.Error(err) + logger.Error(r.Context(), "Failed to send response", "error", err) return } } @@ -75,7 +76,7 @@ func SendPanic(w http.ResponseWriter, r *http.Request) { r.URL.Path, err.Error(), ) - glog.Error(err) + logger.Error(r.Context(), "Failed to send panic response", "error", err) } } @@ -85,6 +86,7 @@ func SendPanic(w http.ResponseWriter, r *http.Request) { var panicBody []byte func init() { + ctx := context.Background() var err error // Create the panic error body: @@ -105,7 +107,7 @@ func init() { "can't create the panic error body: %s", err.Error(), ) - glog.Error(err) + logger.Error(ctx, "Failed to create panic error body", "error", err) os.Exit(1) } } diff --git a/pkg/auth/authz_middleware_mock.go b/pkg/auth/authz_middleware_mock.go index 9a0a345..4242cbc 100755 --- a/pkg/auth/authz_middleware_mock.go +++ b/pkg/auth/authz_middleware_mock.go @@ -3,7 +3,7 @@ package auth import ( "net/http" - "github.com/golang/glog" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/logger" ) type authzMiddlewareMock struct{} @@ -16,7 +16,7 @@ func NewAuthzMiddlewareMock() AuthorizationMiddleware { func (a authzMiddlewareMock) AuthorizeApi(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - glog.Infof("Mock authz allows / for %q/%q", r.Method, r.URL) + logger.Info(r.Context(), "Mock authz allows / for method/url", "method", r.Method, "url", r.URL.String()) next.ServeHTTP(w, r) }) } diff --git a/pkg/auth/helpers.go b/pkg/auth/helpers.go index 883042a..9be109b 100755 --- a/pkg/auth/helpers.go +++ b/pkg/auth/helpers.go @@ -10,13 +10,12 @@ import ( ) func handleError(ctx context.Context, w http.ResponseWriter, code errors.ServiceErrorCode, reason string) { - log := logger.NewOCMLogger(ctx) operationID := logger.GetOperationID(ctx) err := errors.New(code, "%s", reason) if err.HttpCode >= 400 && err.HttpCode <= 499 { - log.Infof(err.Error()) + logger.Info(ctx, err.Error()) } else { - log.Error(err.Error()) + logger.Error(ctx, err.Error()) } writeJSONResponse(w, err.HttpCode, err.AsOpenapiError(operationID)) diff --git a/pkg/db/context.go b/pkg/db/context.go index a0d0ac6..d77bb73 100755 --- a/pkg/db/context.go +++ b/pkg/db/context.go @@ -22,23 +22,22 @@ func NewContext(ctx context.Context, connection SessionFactory) (context.Context // Resolve resolves the current transaction according to the rollback flag. func Resolve(ctx context.Context) { - log := logger.NewOCMLogger(ctx) tx, ok := dbContext.Transaction(ctx) if !ok { - log.Error("Could not retrieve transaction from context") + logger.Error(ctx, "Could not retrieve transaction from context") return } if tx.MarkedForRollback() { if err := tx.Rollback(); err != nil { - log.Extra("error", err.Error()).Error("Could not rollback transaction") + logger.Error(ctx, "Could not rollback transaction", "error", err.Error()) return } - log.Infof("Rolled back transaction") + logger.Info(ctx, "Rolled back transaction") } else { if err := tx.Commit(); err != nil { // TODO: what does the user see when this occurs? seems like they will get a false positive - log.Extra("error", err.Error()).Error("Could not commit transaction") + logger.Error(ctx, "Could not commit transaction", "error", err.Error()) return } } @@ -46,12 +45,11 @@ func Resolve(ctx context.Context) { // MarkForRollback flags the transaction stored in the context for rollback and logs whatever error caused the rollback func MarkForRollback(ctx context.Context, err error) { - log := logger.NewOCMLogger(ctx) transaction, ok := dbContext.Transaction(ctx) if !ok { - log.Error("failed to mark transaction for rollback: could not retrieve transaction from context") + logger.Error(ctx, "failed to mark transaction for rollback: could not retrieve transaction from context") return } transaction.SetRollbackFlag(true) - log.Infof("Marked transaction for rollback, err: %v", err) + logger.Info(ctx, "Marked transaction for rollback", "error", err) } diff --git a/pkg/db/db_session/default.go b/pkg/db/db_session/default.go index 5be907c..a08017b 100755 --- a/pkg/db/db_session/default.go +++ b/pkg/db/db_session/default.go @@ -8,13 +8,13 @@ import ( "gorm.io/driver/postgres" "gorm.io/gorm" - "gorm.io/gorm/logger" + gormlogger "gorm.io/gorm/logger" "github.com/lib/pq" "github.com/openshift-hyperfleet/hyperfleet-api/pkg/config" "github.com/openshift-hyperfleet/hyperfleet-api/pkg/db" - ocmlogger "github.com/openshift-hyperfleet/hyperfleet-api/pkg/logger" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/logger" ) type Default struct { @@ -98,18 +98,20 @@ func (f *Default) DirectDB() *sql.DB { } func waitForNotification(l *pq.Listener, callback func(id string)) { - logger := ocmlogger.NewOCMLogger(context.Background()) + ctx := context.Background() for { select { case n := <-l.Notify: - logger.Infof("Received data from channel [%s] : %s", n.Channel, n.Extra) + logger.Info(ctx, "Received data from channel", + "channel", n.Channel, + "data", n.Extra) callback(n.Extra) return case <-time.After(10 * time.Second): - logger.V(10).Infof("Received no events on channel during interval. Pinging source") + logger.Debug(ctx, "Received no events on channel during interval. Pinging source") go func() { if err := l.Ping(); err != nil { - logger.V(10).Infof("Ping failed: %v", err) + logger.Debug(ctx, "Ping failed", "error", err) } }() return @@ -118,11 +120,9 @@ func waitForNotification(l *pq.Listener, callback func(id string)) { } func newListener(ctx context.Context, connstr, channel string, callback func(id string)) { - logger := ocmlogger.NewOCMLogger(ctx) - plog := func(ev pq.ListenerEventType, err error) { if err != nil { - logger.Error(err.Error()) + logger.Error(ctx, err.Error()) } } listener := pq.NewListener(connstr, 10*time.Second, time.Minute, plog) @@ -131,7 +131,7 @@ func newListener(ctx context.Context, connstr, channel string, callback func(id panic(err) } - logger.Infof("Starting channeling monitor for %s", channel) + logger.Info(ctx, "Starting channeling monitor", "channel", channel) for { waitForNotification(listener, callback) } @@ -144,7 +144,7 @@ func (f *Default) NewListener(ctx context.Context, channel string, callback func func (f *Default) New(ctx context.Context) *gorm.DB { conn := f.g2.Session(&gorm.Session{ Context: ctx, - Logger: f.g2.Logger.LogMode(logger.Silent), + Logger: f.g2.Logger.LogMode(gormlogger.Silent), }) if f.config.Debug { conn = conn.Debug() diff --git a/pkg/db/db_session/test.go b/pkg/db/db_session/test.go index 1262e9f..96f2a0d 100755 --- a/pkg/db/db_session/test.go +++ b/pkg/db/db_session/test.go @@ -6,15 +6,14 @@ import ( "fmt" "time" - "github.com/golang/glog" "github.com/lib/pq" - "gorm.io/driver/postgres" "gorm.io/gorm" - "gorm.io/gorm/logger" + gormlogger "gorm.io/gorm/logger" "github.com/openshift-hyperfleet/hyperfleet-api/pkg/config" "github.com/openshift-hyperfleet/hyperfleet-api/pkg/db" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/logger" ) type Test struct { @@ -50,13 +49,14 @@ func NewTestFactory(config *config.DatabaseConfig) *Test { func (f *Test) Init(config *config.DatabaseConfig) { // Only the first time once.Do(func() { + ctx := context.Background() if err := initDatabase(config, db.Migrate); err != nil { - glog.Errorf("error initializing test database: %s", err) + logger.Errorf(ctx, "error initializing test database: %s", err) return } if err := resetDB(config); err != nil { - glog.Errorf("error resetting test database: %s", err) + logger.Errorf(ctx, "error resetting test database: %s", err) return } }) @@ -131,7 +131,7 @@ func connect(name string, config *config.DatabaseConfig) (*sql.DB, *gorm.DB, fun PrepareStmt: false, FullSaveAssociations: false, SkipDefaultTransaction: true, - Logger: logger.Default.LogMode(logger.Silent), + Logger: gormlogger.Default.LogMode(gormlogger.Silent), } g2, err = gorm.Open(postgres.New(postgres.Config{ Conn: dbx, @@ -195,7 +195,7 @@ func (f *Test) New(ctx context.Context) *gorm.DB { conn := f.g2.Session(&gorm.Session{ Context: ctx, - Logger: f.g2.Logger.LogMode(logger.Silent), + Logger: f.g2.Logger.LogMode(gormlogger.Silent), }) if f.config.Debug { conn = conn.Debug() diff --git a/pkg/db/db_session/testcontainer.go b/pkg/db/db_session/testcontainer.go index 9c288cb..8d7c0d5 100755 --- a/pkg/db/db_session/testcontainer.go +++ b/pkg/db/db_session/testcontainer.go @@ -4,19 +4,19 @@ import ( "context" "database/sql" "fmt" + "os" "time" - "github.com/golang/glog" "github.com/testcontainers/testcontainers-go" "github.com/testcontainers/testcontainers-go/modules/postgres" "github.com/testcontainers/testcontainers-go/wait" - gormpostgres "gorm.io/driver/postgres" "gorm.io/gorm" - "gorm.io/gorm/logger" + gormlogger "gorm.io/gorm/logger" "github.com/openshift-hyperfleet/hyperfleet-api/pkg/config" "github.com/openshift-hyperfleet/hyperfleet-api/pkg/db" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/logger" ) type Testcontainer struct { @@ -41,7 +41,7 @@ func NewTestcontainerFactory(config *config.DatabaseConfig) *Testcontainer { func (f *Testcontainer) Init(config *config.DatabaseConfig) { ctx := context.Background() - glog.Infof("Starting PostgreSQL testcontainer...") + logger.Info(ctx, "Starting PostgreSQL testcontainer...") // Create PostgreSQL container container, err := postgres.Run(ctx, @@ -54,7 +54,8 @@ func (f *Testcontainer) Init(config *config.DatabaseConfig) { WithStartupTimeout(60*time.Second)), ) if err != nil { - glog.Fatalf("Failed to start PostgreSQL testcontainer: %s", err) + logger.Errorf(ctx, "Failed to start PostgreSQL testcontainer: %s", err) + os.Exit(1) } f.container = container @@ -62,15 +63,17 @@ func (f *Testcontainer) Init(config *config.DatabaseConfig) { // Get connection string from container connStr, err := container.ConnectionString(ctx, "sslmode=disable") if err != nil { - glog.Fatalf("Failed to get connection string from testcontainer: %s", err) + logger.Errorf(ctx, "Failed to get connection string from testcontainer: %s", err) + os.Exit(1) } - glog.Infof("PostgreSQL testcontainer started at: %s", connStr) + logger.Infof(ctx, "PostgreSQL testcontainer started at: %s", connStr) // Open SQL connection f.sqlDB, err = sql.Open("postgres", connStr) if err != nil { - glog.Fatalf("Failed to connect to testcontainer database: %s", err) + logger.Errorf(ctx, "Failed to connect to testcontainer database: %s", err) + os.Exit(1) } // Configure connection pool @@ -81,11 +84,11 @@ func (f *Testcontainer) Init(config *config.DatabaseConfig) { PrepareStmt: false, FullSaveAssociations: false, SkipDefaultTransaction: true, - Logger: logger.Default.LogMode(logger.Silent), + Logger: gormlogger.Default.LogMode(gormlogger.Silent), } if config.Debug { - conf.Logger = logger.Default.LogMode(logger.Info) + conf.Logger = gormlogger.Default.LogMode(gormlogger.Info) } f.g2, err = gorm.Open(gormpostgres.New(gormpostgres.Config{ @@ -93,16 +96,18 @@ func (f *Testcontainer) Init(config *config.DatabaseConfig) { PreferSimpleProtocol: true, }), conf) if err != nil { - glog.Fatalf("Failed to connect GORM to testcontainer database: %s", err) + logger.Errorf(ctx, "Failed to connect GORM to testcontainer database: %s", err) + os.Exit(1) } // Run migrations - glog.Infof("Running database migrations on testcontainer...") + logger.Info(ctx, "Running database migrations on testcontainer...") if err := db.Migrate(f.g2); err != nil { - glog.Fatalf("Failed to run migrations on testcontainer: %s", err) + logger.Errorf(ctx, "Failed to run migrations on testcontainer: %s", err) + os.Exit(1) } - glog.Infof("Testcontainer database initialized successfully") + logger.Info(ctx, "Testcontainer database initialized successfully") } func (f *Testcontainer) DirectDB() *sql.DB { @@ -112,7 +117,7 @@ func (f *Testcontainer) DirectDB() *sql.DB { func (f *Testcontainer) New(ctx context.Context) *gorm.DB { conn := f.g2.Session(&gorm.Session{ Context: ctx, - Logger: f.g2.Logger.LogMode(logger.Silent), + Logger: f.g2.Logger.LogMode(gormlogger.Silent), }) if f.config.Debug { conn = conn.Debug() @@ -131,17 +136,17 @@ func (f *Testcontainer) Close() error { // Close SQL connection if f.sqlDB != nil { if err := f.sqlDB.Close(); err != nil { - glog.Errorf("Error closing SQL connection: %s", err) + logger.Errorf(ctx, "Error closing SQL connection: %s", err) } } // Terminate container if f.container != nil { - glog.Infof("Stopping PostgreSQL testcontainer...") + logger.Info(ctx, "Stopping PostgreSQL testcontainer...") if err := f.container.Terminate(ctx); err != nil { return fmt.Errorf("failed to terminate testcontainer: %s", err) } - glog.Infof("PostgreSQL testcontainer stopped") + logger.Info(ctx, "PostgreSQL testcontainer stopped") } return nil @@ -163,7 +168,7 @@ func (f *Testcontainer) ResetDB() { for _, table := range tables { if g2.Migrator().HasTable(table) { if err := g2.Exec(fmt.Sprintf("TRUNCATE TABLE %s CASCADE", table)).Error; err != nil { - glog.Errorf("Error truncating table %s: %s", table, err) + logger.Error(ctx, "Error truncating table", "table", table, "error", err) } } } @@ -173,7 +178,7 @@ func (f *Testcontainer) NewListener(ctx context.Context, channel string, callbac // Get the connection string for the listener connStr, err := f.container.ConnectionString(ctx, "sslmode=disable") if err != nil { - glog.Errorf("Failed to get connection string for listener: %s", err) + logger.Errorf(ctx, "Failed to get connection string for listener: %s", err) return } diff --git a/pkg/db/migrations.go b/pkg/db/migrations.go index e65e7af..f31e343 100755 --- a/pkg/db/migrations.go +++ b/pkg/db/migrations.go @@ -2,12 +2,13 @@ package db import ( "context" + "os" "github.com/go-gormigrate/gormigrate/v2" - "github.com/golang/glog" - "github.com/openshift-hyperfleet/hyperfleet-api/pkg/db/migrations" - "gorm.io/gorm" + + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/db/migrations" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/logger" ) // gormigrate is a wrapper for gorm's migration functions that adds schema versioning and rollback capabilities. @@ -26,11 +27,15 @@ func Migrate(g2 *gorm.DB) error { // schema based on the most recent migration // This should be for testing purposes mainly func MigrateTo(sessionFactory SessionFactory, migrationID string) { - g2 := sessionFactory.New(context.Background()) + ctx := context.Background() + g2 := sessionFactory.New(ctx) m := newGormigrate(g2) if err := m.MigrateTo(migrationID); err != nil { - glog.Fatalf("Could not migrate: %v", err) + logger.Error(ctx, "Could not migrate", + "migration_id", migrationID, + "error", err.Error()) + os.Exit(1) } } diff --git a/pkg/db/transaction_middleware.go b/pkg/db/transaction_middleware.go index 9951d27..ae0a7b7 100755 --- a/pkg/db/transaction_middleware.go +++ b/pkg/db/transaction_middleware.go @@ -12,26 +12,19 @@ import ( // and stores it in the request context. func TransactionMiddleware(next http.Handler, connection SessionFactory) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // Create a new Context with the transaction stored in it. ctx, err := NewContext(r.Context(), connection) - log := logger.NewOCMLogger(ctx) if err != nil { - log.Extra("error", err.Error()).Error("Could not create transaction") + logger.Error(r.Context(), "Could not create transaction", "error", err.Error()) // use default error to avoid exposing internals to users err := errors.GeneralError("") - operationID := logger.GetOperationID(ctx) + operationID := logger.GetOperationID(r.Context()) writeJSONResponse(w, err.HttpCode, err.AsOpenapiError(operationID)) return } - // Set the value of the request pointer to the value of a new copy of the request with the new context key,vale - // stored in it *r = *r.WithContext(ctx) - - // Returned from handlers and resolve transactions. defer func() { Resolve(r.Context()) }() - // Continue handling requests. next.ServeHTTP(w, r) }) } diff --git a/pkg/errors/errors.go b/pkg/errors/errors.go index d29fa7a..9685103 100755 --- a/pkg/errors/errors.go +++ b/pkg/errors/errors.go @@ -1,13 +1,13 @@ package errors import ( + "context" "fmt" "net/http" "strconv" - "github.com/golang/glog" - "github.com/openshift-hyperfleet/hyperfleet-api/pkg/api/openapi" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/logger" ) const ( @@ -114,7 +114,8 @@ func New(code ServiceErrorCode, reason string, values ...interface{}) *ServiceEr var err *ServiceError exists, err := Find(code) if !exists { - glog.Errorf("Undefined error code used: %d", code) + ctx := context.Background() + logger.Errorf(ctx, "Undefined error code used: %d", code) err = &ServiceError{ Code: ErrorGeneral, Reason: "Unspecified error", diff --git a/pkg/handlers/cluster_status.go b/pkg/handlers/cluster_status.go index c202299..0cb1d4f 100644 --- a/pkg/handlers/cluster_status.go +++ b/pkg/handlers/cluster_status.go @@ -98,8 +98,9 @@ func (h clusterStatusHandler) Create(w http.ResponseWriter, r *http.Request) { // Trigger status aggregation if _, aggregateErr := h.clusterService.UpdateClusterStatusFromAdapters(ctx, clusterID); aggregateErr != nil { // Log error but don't fail the request - the status will be computed on next update - log := logger.NewOCMLogger(ctx) - log.Extra("cluster_id", clusterID).Extra("error", aggregateErr).Warning("Failed to aggregate cluster status") + logger.Warn(ctx, "Failed to aggregate cluster status", + "cluster_id", clusterID, + "error", aggregateErr) } status, presErr := presenters.PresentAdapterStatus(adapterStatus) diff --git a/pkg/handlers/framework.go b/pkg/handlers/framework.go index 389fe83..79a46fb 100755 --- a/pkg/handlers/framework.go +++ b/pkg/handlers/framework.go @@ -29,13 +29,18 @@ type errorHandlerFunc func(r *http.Request, w http.ResponseWriter, err *errors.S type httpAction func() (interface{}, *errors.ServiceError) func handleError(r *http.Request, w http.ResponseWriter, err *errors.ServiceError) { - log := logger.NewOCMLogger(r.Context()) operationID := logger.GetOperationID(r.Context()) // If this is a 400 error, its the user's issue, log as info rather than error if err.HttpCode >= 400 && err.HttpCode <= 499 { - log.Infof(err.Error()) + logger.Info(r.Context(), "Client error response", + "code", err.Code, + "http_code", err.HttpCode, + "reason", err.Reason) } else { - log.Error(err.Error()) + logger.Error(r.Context(), "Server error response", + "code", err.Code, + "http_code", err.HttpCode, + "reason", err.Reason) } writeJSONResponse(w, r, err.HttpCode, err.AsOpenapiError(operationID)) } diff --git a/pkg/handlers/helpers.go b/pkg/handlers/helpers.go index 1d0bcf3..438ad79 100755 --- a/pkg/handlers/helpers.go +++ b/pkg/handlers/helpers.go @@ -19,22 +19,20 @@ func writeJSONResponse(w http.ResponseWriter, r *http.Request, code int, payload response, err := json.Marshal(payload) if err != nil { // Headers already sent, can't change status code - log := logger.NewOCMLogger(r.Context()) - log.Extra("endpoint", r.URL.Path). - Extra("method", r.Method). - Extra("status_code", code). - Extra("error", err.Error()). - Error("Failed to marshal JSON response payload") + logger.Error(r.Context(), "Failed to marshal JSON response payload", + "endpoint", r.URL.Path, + "method", r.Method, + "status_code", code, + "error", err.Error()) return } if _, err := w.Write(response); err != nil { // Writing failed, nothing we can do at this point - log := logger.NewOCMLogger(r.Context()) - log.Extra("endpoint", r.URL.Path). - Extra("method", r.Method). - Extra("status_code", code). - Extra("error", err.Error()). - Error("Failed to write JSON response body") + logger.Error(r.Context(), "Failed to write JSON response body", + "endpoint", r.URL.Path, + "method", r.Method, + "status_code", code, + "error", err.Error()) return } } diff --git a/pkg/handlers/metadata.go b/pkg/handlers/metadata.go index 0f45047..7a2654a 100755 --- a/pkg/handlers/metadata.go +++ b/pkg/handlers/metadata.go @@ -45,9 +45,10 @@ func (h metadataHandler) Get(w http.ResponseWriter, r *http.Request) { } data, err := json.Marshal(body) if err != nil { - log := logger.NewOCMLogger(r.Context()) - log.Extra("endpoint", r.URL.Path).Extra("method", r.Method).Extra("error", err.Error()). - Error("Failed to marshal metadata response") + logger.Error(r.Context(), "Failed to marshal metadata response", + "endpoint", r.URL.Path, + "method", r.Method, + "error", err.Error()) api.SendPanic(w, r) return } @@ -55,9 +56,10 @@ func (h metadataHandler) Get(w http.ResponseWriter, r *http.Request) { // Send the response: _, err = w.Write(data) if err != nil { - log := logger.NewOCMLogger(r.Context()) - log.Extra("endpoint", r.URL.Path).Extra("method", r.Method).Extra("error", err.Error()). - Error("Failed to send metadata response body") + logger.Error(r.Context(), "Failed to send metadata response body", + "endpoint", r.URL.Path, + "method", r.Method, + "error", err.Error()) return } } diff --git a/pkg/handlers/nodepool_status.go b/pkg/handlers/nodepool_status.go index ce79d40..2f26427 100644 --- a/pkg/handlers/nodepool_status.go +++ b/pkg/handlers/nodepool_status.go @@ -98,8 +98,9 @@ func (h nodePoolStatusHandler) Create(w http.ResponseWriter, r *http.Request) { // Trigger status aggregation if _, aggregateErr := h.nodePoolService.UpdateNodePoolStatusFromAdapters(ctx, nodePoolID); aggregateErr != nil { // Log error but don't fail the request - the status will be computed on next update - log := logger.NewOCMLogger(ctx) - log.Extra("nodepool_id", nodePoolID).Extra("error", aggregateErr).Warning("Failed to aggregate nodepool status") + logger.Warn(ctx, "Failed to aggregate nodepool status", + "nodepool_id", nodePoolID, + "error", aggregateErr) } status, presErr := presenters.PresentAdapterStatus(adapterStatus) diff --git a/pkg/handlers/openapi.go b/pkg/handlers/openapi.go index 5fee317..f6812d4 100755 --- a/pkg/handlers/openapi.go +++ b/pkg/handlers/openapi.go @@ -1,12 +1,13 @@ package handlers import ( + "context" "embed" "io/fs" "net/http" "github.com/ghodss/yaml" - "github.com/golang/glog" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/api" "github.com/openshift-hyperfleet/hyperfleet-api/pkg/errors" "github.com/openshift-hyperfleet/hyperfleet-api/pkg/logger" @@ -21,6 +22,7 @@ type openAPIHandler struct { } func NewOpenAPIHandler() (*openAPIHandler, error) { + ctx := context.Background() // Load the fully resolved OpenAPI spec from embedded filesystem resolvedData, err := api.GetOpenAPISpec() if err != nil { @@ -38,7 +40,7 @@ func NewOpenAPIHandler() (*openAPIHandler, error) { err, ) } - glog.Info("Loaded fully resolved OpenAPI specification from embedded pkg/api/openapi/api/openapi.yaml") + logger.Info(ctx, "Loaded fully resolved OpenAPI specification from embedded pkg/api/openapi/api/openapi.yaml") // Load the OpenAPI UI HTML content uiContent, err := fs.ReadFile(openapiui, "openapi-ui.html") @@ -48,7 +50,7 @@ func NewOpenAPIHandler() (*openAPIHandler, error) { err, ) } - glog.Info("Loaded OpenAPI UI HTML from embedded file") + logger.Info(ctx, "Loaded OpenAPI UI HTML from embedded file") return &openAPIHandler{ openAPIDefinitions: data, @@ -61,9 +63,11 @@ func (h *openAPIHandler) GetOpenAPI(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) if _, err := w.Write(h.openAPIDefinitions); err != nil { // Response already committed, can't report error - log := logger.NewOCMLogger(r.Context()) - log.Extra("endpoint", r.URL.Path).Extra("method", r.Method).Extra("status_code", http.StatusOK). - Extra("error", err.Error()).Error("Failed to write OpenAPI specification response") + logger.Error(r.Context(), "Failed to write OpenAPI specification response", + "endpoint", r.URL.Path, + "method", r.Method, + "status_code", http.StatusOK, + "error", err.Error()) return } } @@ -73,9 +77,11 @@ func (h *openAPIHandler) GetOpenAPIUI(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) if _, err := w.Write(h.uiContent); err != nil { // Response already committed, can't report error - log := logger.NewOCMLogger(r.Context()) - log.Extra("endpoint", r.URL.Path).Extra("method", r.Method).Extra("status_code", http.StatusOK). - Extra("error", err.Error()).Error("Failed to write OpenAPI UI response") + logger.Error(r.Context(), "Failed to write OpenAPI UI response", + "endpoint", r.URL.Path, + "method", r.Method, + "status_code", http.StatusOK, + "error", err.Error()) return } } diff --git a/pkg/logger/context.go b/pkg/logger/context.go index 6e72082..e4d7da7 100644 --- a/pkg/logger/context.go +++ b/pkg/logger/context.go @@ -6,23 +6,25 @@ import ( "github.com/segmentio/ksuid" ) -// Context key types for type safety -type OperationIDKey string -type TraceIDKey string -type SpanIDKey string -type ClusterIDKey string -type ResourceTypeKey string -type ResourceIDKey string +// contextKey is an unexported type for keys defined in this package. +// This prevents collisions with keys defined in other packages. +type contextKey string // Context keys for storing correlation fields const ( - OpIDKey OperationIDKey = "opID" // Keep existing value for compatibility - OpIDHeader OperationIDKey = "X-Operation-ID" // HTTP header for operation ID - TraceIDCtxKey TraceIDKey = "trace_id" - SpanIDCtxKey SpanIDKey = "span_id" - ClusterIDCtxKey ClusterIDKey = "cluster_id" - ResourceTypeCtxKey ResourceTypeKey = "resource_type" - ResourceIDCtxKey ResourceIDKey = "resource_id" + OpIDKey contextKey = "operation_id" + AccountIDCtxKey contextKey = "account_id" + TransactionIDCtxKey contextKey = "transaction_id" + TraceIDCtxKey contextKey = "trace_id" + SpanIDCtxKey contextKey = "span_id" + ClusterIDCtxKey contextKey = "cluster_id" + ResourceTypeCtxKey contextKey = "resource_type" + ResourceIDCtxKey contextKey = "resource_id" +) + +// HTTP header names +const ( + OpIDHeader = "X-Operation-ID" ) // WithTraceID adds trace ID to context @@ -98,3 +100,25 @@ func GetResourceID(ctx context.Context) (string, bool) { resourceID, ok := ctx.Value(ResourceIDCtxKey).(string) return resourceID, ok } + +// WithAccountID adds account ID to context +func WithAccountID(ctx context.Context, accountID string) context.Context { + return context.WithValue(ctx, AccountIDCtxKey, accountID) +} + +// GetAccountID retrieves account ID from context +func GetAccountID(ctx context.Context) (string, bool) { + accountID, ok := ctx.Value(AccountIDCtxKey).(string) + return accountID, ok +} + +// WithTransactionID adds transaction ID to context +func WithTransactionID(ctx context.Context, transactionID int64) context.Context { + return context.WithValue(ctx, TransactionIDCtxKey, transactionID) +} + +// GetTransactionID retrieves transaction ID from context +func GetTransactionID(ctx context.Context) (int64, bool) { + transactionID, ok := ctx.Value(TransactionIDCtxKey).(int64) + return transactionID, ok +} diff --git a/pkg/logger/http.go b/pkg/logger/http.go new file mode 100644 index 0000000..15266f4 --- /dev/null +++ b/pkg/logger/http.go @@ -0,0 +1,49 @@ +package logger + +import ( + "log/slog" + "net/http" + "time" +) + +// HTTPMethod returns a slog attribute for HTTP method +func HTTPMethod(method string) slog.Attr { + return slog.String("method", method) +} + +// HTTPPath returns a slog attribute for HTTP path +func HTTPPath(path string) slog.Attr { + return slog.String("path", path) +} + +// HTTPStatusCode returns a slog attribute for HTTP status code +func HTTPStatusCode(code int) slog.Attr { + return slog.Int("status_code", code) +} + +// HTTPDuration returns a slog attribute for HTTP request duration in milliseconds +func HTTPDuration(d time.Duration) slog.Attr { + return slog.Int64("duration_ms", d.Milliseconds()) +} + +// HTTPUserAgent returns a slog attribute for HTTP user agent +func HTTPUserAgent(ua string) slog.Attr { + return slog.String("user_agent", ua) +} + +// HTTPRequestAttrs returns a slice of slog attributes for HTTP request +func HTTPRequestAttrs(r *http.Request) []slog.Attr { + return []slog.Attr{ + HTTPMethod(r.Method), + HTTPPath(r.URL.Path), + HTTPUserAgent(r.UserAgent()), + } +} + +// HTTPResponseAttrs returns a slice of slog attributes for HTTP response +func HTTPResponseAttrs(statusCode int, duration time.Duration) []slog.Attr { + return []slog.Attr{ + HTTPStatusCode(statusCode), + HTTPDuration(duration), + } +} diff --git a/pkg/logger/logger.go b/pkg/logger/logger.go index 107b9a9..3abe2d3 100755 --- a/pkg/logger/logger.go +++ b/pkg/logger/logger.go @@ -8,9 +8,7 @@ import ( "os" "runtime" "strings" - - "github.com/golang/glog" - "github.com/openshift-hyperfleet/hyperfleet-api/pkg/util" + "sync/atomic" ) // LogFormat enumeration for output format @@ -42,7 +40,7 @@ type HyperFleetHandler struct { hostname string } -// NewHyperFleetHandler creates a custom slog handler with HyperFleet-specific fields +// NewHyperFleetHandler creates a HyperFleet logger handler func NewHyperFleetHandler(cfg *LogConfig) *HyperFleetHandler { var baseHandler slog.Handler opts := &slog.HandlerOptions{ @@ -53,8 +51,12 @@ func NewHyperFleetHandler(cfg *LogConfig) *HyperFleetHandler { return slog.Attr{Key: "timestamp", Value: a.Value} } if a.Key == slog.LevelKey { - level := a.Value.Any().(slog.Level) - return slog.Attr{Key: "level", Value: slog.StringValue(strings.ToLower(level.String()))} + // Safe type assertion to avoid panic + if level, ok := a.Value.Any().(slog.Level); ok { + return slog.Attr{Key: "level", Value: slog.StringValue(strings.ToLower(level.String()))} + } + // Fallback: convert to string if type assertion fails + return slog.Attr{Key: "level", Value: slog.StringValue(strings.ToLower(fmt.Sprint(a.Value.Any())))} } if a.Key == slog.MessageKey { return slog.Attr{Key: "message", Value: a.Value} @@ -88,29 +90,34 @@ func (h *HyperFleetHandler) Handle(ctx context.Context, r slog.Record) error { slog.String("hostname", h.hostname), ) - if traceID, ok := ctx.Value(TraceIDCtxKey).(string); ok && traceID != "" { + if traceID, ok := GetTraceID(ctx); ok && traceID != "" { r.AddAttrs(slog.String("trace_id", traceID)) } - if spanID, ok := ctx.Value(SpanIDCtxKey).(string); ok && spanID != "" { + if spanID, ok := GetSpanID(ctx); ok && spanID != "" { r.AddAttrs(slog.String("span_id", spanID)) } - if requestID, ok := ctx.Value(OpIDKey).(string); ok && requestID != "" { - r.AddAttrs(slog.String("request_id", requestID)) + if operationID := GetOperationID(ctx); operationID != "" { + r.AddAttrs(slog.String("operation_id", operationID)) + } + if accountID, ok := GetAccountID(ctx); ok && accountID != "" { + r.AddAttrs(slog.String("account_id", accountID)) } - if clusterID, ok := ctx.Value(ClusterIDCtxKey).(string); ok && clusterID != "" { + if transactionID, ok := GetTransactionID(ctx); ok && transactionID != 0 { + r.AddAttrs(slog.Int64("transaction_id", transactionID)) + } + if clusterID, ok := GetClusterID(ctx); ok && clusterID != "" { r.AddAttrs(slog.String("cluster_id", clusterID)) } - if resourceType, ok := ctx.Value(ResourceTypeCtxKey).(string); ok && resourceType != "" { + if resourceType, ok := GetResourceType(ctx); ok && resourceType != "" { r.AddAttrs(slog.String("resource_type", resourceType)) } - if resourceID, ok := ctx.Value(ResourceIDCtxKey).(string); ok && resourceID != "" { + if resourceID, ok := GetResourceID(ctx); ok && resourceID != "" { r.AddAttrs(slog.String("resource_id", resourceID)) } if r.Level >= slog.LevelError { stackTrace := captureStackTrace(4) r.AddAttrs(slog.Any("stack_trace", stackTrace)) - r.AddAttrs(slog.String("error", r.Message)) } return h.handler.Handle(ctx, r) @@ -162,21 +169,47 @@ func captureStackTrace(skip int) []string { return stackTrace } -var globalLogger *slog.Logger +var globalLogger atomic.Value // stores *slog.Logger // InitGlobalLogger initializes the global logger with the given configuration +// This function is thread-safe and idempotent (safe to call multiple times) func InitGlobalLogger(cfg *LogConfig) { + if v := globalLogger.Load(); v != nil { + if logger, ok := v.(*slog.Logger); ok && logger != nil { + return + } + } + handler := NewHyperFleetHandler(cfg) - globalLogger = slog.New(handler) - slog.SetDefault(globalLogger) + logger := slog.New(handler) + globalLogger.Store(logger) + slog.SetDefault(logger) } -// GetLogger returns a context-aware logger -func GetLogger(ctx context.Context) *slog.Logger { - if globalLogger == nil { - return slog.Default() +// ReconfigureGlobalLogger reconfigures the global logger with new configuration +// Unlike InitGlobalLogger, this can be called multiple times to update configuration +// This is useful when environment configuration is loaded after initial logger setup +func ReconfigureGlobalLogger(cfg *LogConfig) { + handler := NewHyperFleetHandler(cfg) + newLogger := slog.New(handler) + globalLogger.Store(newLogger) + slog.SetDefault(newLogger) +} + +// resetForTesting resets the global logger for testing purposes +// This function should ONLY be used in tests +func resetForTesting() { + globalLogger.Store((*slog.Logger)(nil)) +} + +// GetLogger returns the global logger instance +func GetLogger() *slog.Logger { + if v := globalLogger.Load(); v != nil { + if logger, ok := v.(*slog.Logger); ok && logger != nil { + return logger + } } - return globalLogger + return slog.Default() } // ParseLogLevel converts string to slog.Level @@ -220,151 +253,33 @@ func ParseLogOutput(output string) (io.Writer, error) { } func Debug(ctx context.Context, msg string, args ...any) { - GetLogger(ctx).DebugContext(ctx, msg, args...) + GetLogger().DebugContext(ctx, msg, args...) } func Info(ctx context.Context, msg string, args ...any) { - GetLogger(ctx).InfoContext(ctx, msg, args...) + GetLogger().InfoContext(ctx, msg, args...) } func Warn(ctx context.Context, msg string, args ...any) { - GetLogger(ctx).WarnContext(ctx, msg, args...) + GetLogger().WarnContext(ctx, msg, args...) } func Error(ctx context.Context, msg string, args ...any) { - GetLogger(ctx).ErrorContext(ctx, msg, args...) + GetLogger().ErrorContext(ctx, msg, args...) } func Debugf(ctx context.Context, format string, args ...interface{}) { - GetLogger(ctx).DebugContext(ctx, fmt.Sprintf(format, args...)) + GetLogger().DebugContext(ctx, fmt.Sprintf(format, args...)) } func Infof(ctx context.Context, format string, args ...interface{}) { - GetLogger(ctx).InfoContext(ctx, fmt.Sprintf(format, args...)) + GetLogger().InfoContext(ctx, fmt.Sprintf(format, args...)) } func Warnf(ctx context.Context, format string, args ...interface{}) { - GetLogger(ctx).WarnContext(ctx, fmt.Sprintf(format, args...)) + GetLogger().WarnContext(ctx, fmt.Sprintf(format, args...)) } func Errorf(ctx context.Context, format string, args ...interface{}) { - GetLogger(ctx).ErrorContext(ctx, fmt.Sprintf(format, args...)) -} - -// Legacy glog-based logger for backward compatibility -// DEPRECATED: Will be removed in PR 3 -type OCMLogger interface { - V(level int32) OCMLogger - Infof(format string, args ...interface{}) - Extra(key string, value interface{}) OCMLogger - Info(message string) - Warning(message string) - Error(message string) - Fatal(message string) -} - -var _ OCMLogger = &ocmLogger{} - -type extra map[string]interface{} - -type ocmLogger struct { - context context.Context - level int32 - accountID string - username string - extra extra -} - -// NewOCMLogger creates a legacy logger instance -// DEPRECATED: Use slog-based logger instead -func NewOCMLogger(ctx context.Context) OCMLogger { - logger := &ocmLogger{ - context: ctx, - level: 1, - extra: make(extra), - accountID: util.GetAccountIDFromContext(ctx), - } - return logger -} - -func (l *ocmLogger) prepareLogPrefix(message string, extra extra) string { - prefix := " " - - if txid, ok := l.context.Value("txid").(int64); ok { - prefix = fmt.Sprintf("[tx_id=%d]%s", txid, prefix) - } - - if l.accountID != "" { - prefix = fmt.Sprintf("[accountID=%s]%s", l.accountID, prefix) - } - - if opid, ok := l.context.Value(OpIDKey).(string); ok { - prefix = fmt.Sprintf("[opid=%s]%s", opid, prefix) - } - - var args []string - for k, v := range extra { - args = append(args, fmt.Sprintf("%s=%v", k, v)) - } - - return fmt.Sprintf("%s %s %s", prefix, message, strings.Join(args, " ")) -} - -func (l *ocmLogger) prepareLogPrefixf(format string, args ...interface{}) string { - orig := fmt.Sprintf(format, args...) - prefix := " " - - if txid, ok := l.context.Value("txid").(int64); ok { - prefix = fmt.Sprintf("[tx_id=%d]%s", txid, prefix) - } - - if l.accountID != "" { - prefix = fmt.Sprintf("[accountID=%s]%s", l.accountID, prefix) - } - - if opid, ok := l.context.Value(OpIDKey).(string); ok { - prefix = fmt.Sprintf("[opid=%s]%s", opid, prefix) - } - - return fmt.Sprintf("%s%s", prefix, orig) -} - -func (l *ocmLogger) V(level int32) OCMLogger { - return &ocmLogger{ - context: l.context, - accountID: l.accountID, - username: l.username, - level: level, - } -} - -func (l *ocmLogger) Infof(format string, args ...interface{}) { - prefixed := l.prepareLogPrefixf(format, args...) - glog.V(glog.Level(l.level)).Infof("%s", prefixed) -} - -func (l *ocmLogger) Extra(key string, value interface{}) OCMLogger { - l.extra[key] = value - return l -} - -func (l *ocmLogger) Info(message string) { - l.log(message, glog.V(glog.Level(l.level)).Infoln) -} - -func (l *ocmLogger) Warning(message string) { - l.log(message, glog.Warningln) -} - -func (l *ocmLogger) Error(message string) { - l.log(message, glog.Errorln) -} - -func (l *ocmLogger) Fatal(message string) { - l.log(message, glog.Fatalln) -} - -func (l *ocmLogger) log(message string, glogFunc func(args ...interface{})) { - prefixed := l.prepareLogPrefix(message, l.extra) - glogFunc(prefixed) + GetLogger().ErrorContext(ctx, fmt.Sprintf(format, args...)) } diff --git a/pkg/logger/logger_test.go b/pkg/logger/logger_test.go index 3b4d741..7dd2611 100644 --- a/pkg/logger/logger_test.go +++ b/pkg/logger/logger_test.go @@ -122,73 +122,76 @@ func TestParseLogOutput(t *testing.T) { } } -// TestJSONFormat_BasicLog tests basic JSON format output -func TestJSONFormat_BasicLog(t *testing.T) { - var buf bytes.Buffer - cfg := &LogConfig{ - Level: slog.LevelInfo, - Format: FormatJSON, - Output: &buf, - Component: "api", - Version: "test-version", - Hostname: "test-host", +// TestBasicLogFormat tests basic log format output for both JSON and text formats +func TestBasicLogFormat(t *testing.T) { + tests := []struct { + name string + format LogFormat + }{ + {"JSON format", FormatJSON}, + {"Text format", FormatText}, } - InitGlobalLogger(cfg) - ctx := context.Background() - Info(ctx, "test message", "key", "value") - - var logEntry map[string]interface{} - if err := json.Unmarshal(buf.Bytes(), &logEntry); err != nil { - t.Fatalf("failed to parse JSON log: %v", err) - } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + resetForTesting() + var buf bytes.Buffer + cfg := &LogConfig{ + Level: slog.LevelInfo, + Format: tt.format, + Output: &buf, + Component: "api", + Version: "test-version", + Hostname: "test-host", + } - // Check required fields - if logEntry["message"] != "test message" { - t.Errorf("expected message 'test message', got %v", logEntry["message"]) - } - if logEntry["level"] != "info" { - t.Errorf("expected level 'info', got %v", logEntry["level"]) - } - if logEntry["component"] != "api" { - t.Errorf("expected component 'api', got %v", logEntry["component"]) - } - if logEntry["version"] != "test-version" { - t.Errorf("expected version 'test-version', got %v", logEntry["version"]) - } - if logEntry["hostname"] != "test-host" { - t.Errorf("expected hostname 'test-host', got %v", logEntry["hostname"]) - } - if logEntry["key"] != "value" { - t.Errorf("expected key 'value', got %v", logEntry["key"]) - } -} + InitGlobalLogger(cfg) + ctx := context.Background() + Info(ctx, "test message", "key", "value") -// TestTextFormat_BasicLog tests basic text format output -func TestTextFormat_BasicLog(t *testing.T) { - var buf bytes.Buffer - cfg := &LogConfig{ - Level: slog.LevelInfo, - Format: FormatText, - Output: &buf, - Component: "api", - Version: "test-version", - Hostname: "test-host", - } + output := buf.String() + if output == "" { + t.Fatal("expected log output, got none") + } - InitGlobalLogger(cfg) - ctx := context.Background() - Info(ctx, "test message", "key", "value") + if tt.format == FormatJSON { + var logEntry map[string]interface{} + if err := json.Unmarshal(buf.Bytes(), &logEntry); err != nil { + t.Fatalf("failed to parse JSON log: %v", err) + } - output := buf.String() - if !strings.Contains(output, "test message") { - t.Errorf("expected output to contain 'test message', got: %s", output) - } - if !strings.Contains(output, "level=info") { - t.Errorf("expected output to contain 'level=info', got: %s", output) - } - if !strings.Contains(output, "component=api") { - t.Errorf("expected output to contain 'component=api', got: %s", output) + // Check required fields + if logEntry["message"] != "test message" { + t.Errorf("expected message 'test message', got %v", logEntry["message"]) + } + if logEntry["level"] != "info" { + t.Errorf("expected level 'info', got %v", logEntry["level"]) + } + if logEntry["component"] != "api" { + t.Errorf("expected component 'api', got %v", logEntry["component"]) + } + if logEntry["version"] != "test-version" { + t.Errorf("expected version 'test-version', got %v", logEntry["version"]) + } + if logEntry["hostname"] != "test-host" { + t.Errorf("expected hostname 'test-host', got %v", logEntry["hostname"]) + } + if logEntry["key"] != "value" { + t.Errorf("expected key 'value', got %v", logEntry["key"]) + } + } else { + // Text format - check for key content + if !strings.Contains(output, "test message") { + t.Errorf("expected output to contain 'test message', got: %s", output) + } + if !strings.Contains(output, "level=info") { + t.Errorf("expected output to contain 'level=info', got: %s", output) + } + if !strings.Contains(output, "component=api") { + t.Errorf("expected output to contain 'component=api', got: %s", output) + } + } + }) } } @@ -213,11 +216,11 @@ func TestContextFields(t *testing.T) { fieldValue: "span-456", }, { - name: "request_id", + name: "operation_id", setupCtx: func(ctx context.Context) context.Context { return context.WithValue(ctx, OpIDKey, "op-789") }, - fieldName: "request_id", + fieldName: "operation_id", fieldValue: "op-789", }, { @@ -242,6 +245,7 @@ func TestContextFields(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + resetForTesting() var buf bytes.Buffer cfg := &LogConfig{ Level: slog.LevelInfo, @@ -270,6 +274,7 @@ func TestContextFields(t *testing.T) { // TestContextFields_Multiple tests multiple context fields together func TestContextFields_Multiple(t *testing.T) { + resetForTesting() var buf bytes.Buffer cfg := &LogConfig{ Level: slog.LevelInfo, @@ -310,14 +315,14 @@ func TestStackTrace(t *testing.T) { level slog.Level logFunc func(context.Context, string, ...any) expectStackTrace bool - expectErrorField bool }{ - {"error level has stack trace", slog.LevelError, Error, true, true}, - {"info level no stack trace", slog.LevelInfo, Info, false, false}, + {"error level has stack trace", slog.LevelError, Error, true}, + {"info level no stack trace", slog.LevelInfo, Info, false}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + resetForTesting() var buf bytes.Buffer cfg := &LogConfig{ Level: tt.level, @@ -354,16 +359,6 @@ func TestStackTrace(t *testing.T) { t.Error("expected no stack_trace field for non-error level") } } - - if tt.expectErrorField { - if logEntry["error"] != "test message" { - t.Errorf("expected error field 'test message', got %v", logEntry["error"]) - } - } else { - if logEntry["error"] != nil { - t.Error("expected no error field for non-error level") - } - } }) } } @@ -382,6 +377,7 @@ func TestLogLevelFiltering(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + resetForTesting() var buf bytes.Buffer cfg := &LogConfig{ Level: tt.configLevel, @@ -409,12 +405,11 @@ func TestLogLevelFiltering(t *testing.T) { // TestGetLogger_Uninitialized tests GetLogger with uninitialized global logger func TestGetLogger_Uninitialized(t *testing.T) { // Save and restore global logger - saved := globalLogger - defer func() { globalLogger = saved }() + saved := globalLogger.Load() + defer func() { globalLogger.Store(saved) }() - globalLogger = nil - ctx := context.Background() - logger := GetLogger(ctx) + globalLogger.Store((*slog.Logger)(nil)) + logger := GetLogger() if logger == nil { t.Error("expected GetLogger to return non-nil logger") @@ -426,7 +421,7 @@ func TestGetLogger_Uninitialized(t *testing.T) { } } -// TestConvenienceFunctions tests convenience functions (Debug, Info, Warn, Error) +// TestConvenienceFunctions tests all convenience functions (Debug, Info, Warn, Error) func TestConvenienceFunctions(t *testing.T) { tests := []struct { name string @@ -435,11 +430,14 @@ func TestConvenienceFunctions(t *testing.T) { expectedLvl string }{ {"Debug", slog.LevelDebug, Debug, "debug"}, + {"Info", slog.LevelInfo, Info, "info"}, {"Warn", slog.LevelWarn, Warn, "warn"}, + {"Error", slog.LevelError, Error, "error"}, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + resetForTesting() var buf bytes.Buffer cfg := &LogConfig{ Level: tt.level, @@ -480,7 +478,6 @@ func TestFormattedFunctions(t *testing.T) { expectedMsg string expectedLvl string expectStackTrace bool - expectErrorField bool }{ { name: "Debugf", @@ -491,7 +488,6 @@ func TestFormattedFunctions(t *testing.T) { expectedMsg: "debug message with string and 123", expectedLvl: "debug", expectStackTrace: false, - expectErrorField: false, }, { name: "Infof", @@ -502,7 +498,6 @@ func TestFormattedFunctions(t *testing.T) { expectedMsg: "info message: cluster=test-cluster, count=42", expectedLvl: "info", expectStackTrace: false, - expectErrorField: false, }, { name: "Warnf", @@ -513,7 +508,6 @@ func TestFormattedFunctions(t *testing.T) { expectedMsg: "warning: operation failed with code 500", expectedLvl: "warn", expectStackTrace: false, - expectErrorField: false, }, { name: "Errorf", @@ -524,12 +518,12 @@ func TestFormattedFunctions(t *testing.T) { expectedMsg: "error: failed to process resource-123: connection timeout", expectedLvl: "error", expectStackTrace: true, - expectErrorField: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + resetForTesting() var buf bytes.Buffer cfg := &LogConfig{ Level: tt.level, @@ -556,12 +550,6 @@ func TestFormattedFunctions(t *testing.T) { t.Errorf("expected message '%s', got %v", tt.expectedMsg, logEntry["message"]) } - if tt.expectErrorField { - if logEntry["error"] != tt.expectedMsg { - t.Errorf("expected error field '%s', got %v", tt.expectedMsg, logEntry["error"]) - } - } - if tt.expectStackTrace { if logEntry["stack_trace"] == nil { t.Error("expected stack_trace field for error level") diff --git a/pkg/logger/ocm_bridge.go b/pkg/logger/ocm_bridge.go new file mode 100644 index 0000000..96d2c41 --- /dev/null +++ b/pkg/logger/ocm_bridge.go @@ -0,0 +1,64 @@ +package logger + +import ( + "context" + "fmt" + "log/slog" + "os" + + sdk "github.com/openshift-online/ocm-sdk-go" +) + +// OCMLoggerBridge bridges slog to OCM SDK's Logger interface +type OCMLoggerBridge struct{} + +// NewOCMLoggerBridge creates an OCM SDK logger bridge +func NewOCMLoggerBridge() sdk.Logger { + return &OCMLoggerBridge{} +} + +// Debug implements sdk.Logger +func (b *OCMLoggerBridge) Debug(ctx context.Context, format string, args ...interface{}) { + GetLogger().DebugContext(ctx, fmt.Sprintf(format, args...)) +} + +// Info implements sdk.Logger +func (b *OCMLoggerBridge) Info(ctx context.Context, format string, args ...interface{}) { + GetLogger().InfoContext(ctx, fmt.Sprintf(format, args...)) +} + +// Warn implements sdk.Logger +func (b *OCMLoggerBridge) Warn(ctx context.Context, format string, args ...interface{}) { + GetLogger().WarnContext(ctx, fmt.Sprintf(format, args...)) +} + +// Error implements sdk.Logger +func (b *OCMLoggerBridge) Error(ctx context.Context, format string, args ...interface{}) { + GetLogger().ErrorContext(ctx, fmt.Sprintf(format, args...)) +} + +// Fatal implements sdk.Logger +func (b *OCMLoggerBridge) Fatal(ctx context.Context, format string, args ...interface{}) { + GetLogger().ErrorContext(ctx, "FATAL: "+fmt.Sprintf(format, args...)) + os.Exit(1) +} + +// DebugEnabled implements sdk.Logger +func (b *OCMLoggerBridge) DebugEnabled() bool { + return GetLogger().Enabled(context.Background(), slog.LevelDebug) +} + +// InfoEnabled implements sdk.Logger +func (b *OCMLoggerBridge) InfoEnabled() bool { + return GetLogger().Enabled(context.Background(), slog.LevelInfo) +} + +// WarnEnabled implements sdk.Logger +func (b *OCMLoggerBridge) WarnEnabled() bool { + return GetLogger().Enabled(context.Background(), slog.LevelWarn) +} + +// ErrorEnabled implements sdk.Logger +func (b *OCMLoggerBridge) ErrorEnabled() bool { + return GetLogger().Enabled(context.Background(), slog.LevelError) +} diff --git a/pkg/logger/operationid_middleware.go b/pkg/logger/operationid_middleware.go index ad8b527..39024b4 100755 --- a/pkg/logger/operationid_middleware.go +++ b/pkg/logger/operationid_middleware.go @@ -11,7 +11,7 @@ func OperationIDMiddleware(handler http.Handler) http.Handler { opID, ok := ctx.Value(OpIDKey).(string) if ok && len(opID) > 0 { - w.Header().Set(string(OpIDHeader), opID) + w.Header().Set(OpIDHeader, opID) } handler.ServeHTTP(w, r.WithContext(ctx)) diff --git a/pkg/middleware/masking.go b/pkg/middleware/masking.go index a7addf6..5a32e1a 100644 --- a/pkg/middleware/masking.go +++ b/pkg/middleware/masking.go @@ -1,11 +1,14 @@ package middleware import ( + "context" "encoding/json" "net/http" + "regexp" "strings" "github.com/openshift-hyperfleet/hyperfleet-api/pkg/config" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/logger" ) // MaskingMiddleware handles sensitive data masking for logging @@ -16,6 +19,16 @@ type MaskingMiddleware struct { sensitiveFields []string } +var ( + // Regex patterns for text-based fallback masking + emailPattern = regexp.MustCompile(`[a-zA-Z0-9._%+\-]+@[a-zA-Z0-9.\-]+\.[a-zA-Z]{2,}`) + creditCardPattern = regexp.MustCompile(`\b\d{4}[\s\-]?\d{4}[\s\-]?\d{4}[\s\-]?\d{4}\b`) + // Matches common API key/token formats: Bearer tokens, API keys, etc. + apiKeyPattern = regexp.MustCompile(`(?i)(bearer\s+|api[_\-]?key[\s:=]+|token[\s:=]+|secret[\s:=]+|password[\s:=]+)[a-zA-Z0-9\-_\.]+`) + // Matches form-encoded sensitive key-values + formEncodedPattern = regexp.MustCompile(`(?i)(password|token|secret|api[_\-]?key)=[^&\s]+`) +) + // NewMaskingMiddleware creates a new masking middleware from logging config func NewMaskingMiddleware(cfg *config.LoggingConfig) *MaskingMiddleware { return &MaskingMiddleware{ @@ -25,6 +38,40 @@ func NewMaskingMiddleware(cfg *config.LoggingConfig) *MaskingMiddleware { } } +// maskTextFallback applies text-based regex heuristics to redact sensitive data +// Used as fallback when JSON parsing fails +func (m *MaskingMiddleware) maskTextFallback(body []byte) []byte { + text := string(body) + + // Redact email addresses + text = emailPattern.ReplaceAllString(text, "***REDACTED_EMAIL***") + + // Redact credit card numbers + text = creditCardPattern.ReplaceAllString(text, "***REDACTED_CC***") + + // Redact API keys and tokens (keep the prefix for context) + text = apiKeyPattern.ReplaceAllStringFunc(text, func(match string) string { + // Extract and preserve the prefix (e.g., "bearer ", "api_key=") + parts := apiKeyPattern.FindStringSubmatch(match) + if len(parts) > 1 { + return parts[1] + "***REDACTED***" + } + return "***REDACTED***" + }) + + // Redact form-encoded sensitive values + text = formEncodedPattern.ReplaceAllStringFunc(text, func(match string) string { + // Keep the key name but redact the value + idx := strings.Index(match, "=") + if idx > 0 { + return match[:idx+1] + "***REDACTED***" + } + return "***REDACTED***" + }) + + return []byte(text) +} + // MaskHeaders masks sensitive HTTP headers // Returns a new header map with sensitive values replaced by "***REDACTED***" func (m *MaskingMiddleware) MaskHeaders(headers http.Header) http.Header { @@ -45,28 +92,51 @@ func (m *MaskingMiddleware) MaskHeaders(headers http.Header) http.Header { // MaskBody masks sensitive fields in JSON body // Returns a new JSON byte array with sensitive fields replaced by "***REDACTED***" -// If body is not valid JSON, returns original body unchanged +// Handles both top-level objects and top-level arrays +// If masking is disabled, returns original body unchanged +// If masking is enabled but JSON parsing fails, applies text-based fallback masking func (m *MaskingMiddleware) MaskBody(body []byte) []byte { - if !m.enabled || len(body) == 0 { + if len(body) == 0 { return body } - var data map[string]interface{} - if err := json.Unmarshal(body, &data); err != nil { - // Not valid JSON, return unchanged + // If masking is disabled, return original body + if !m.enabled { return body } - m.maskMapRecursive(data) + var data interface{} + if err := json.Unmarshal(body, &data); err != nil { + // JSON parsing failed - use text-based fallback masking to prevent leakage + logger.Warn(context.Background(), "JSON parsing failed in MaskBody, applying text-based fallback masking", + "error", err.Error()) + return m.maskTextFallback(body) + } + + m.maskRecursive(data) masked, err := json.Marshal(data) if err != nil { - // Failed to marshal, return original - return body + // JSON marshaling failed - use text-based fallback masking to prevent leakage + logger.Warn(context.Background(), "JSON marshaling failed in MaskBody, applying text-based fallback masking", + "error", err.Error()) + return m.maskTextFallback(body) } return masked } +// maskRecursive recursively masks sensitive data in any JSON structure +func (m *MaskingMiddleware) maskRecursive(data interface{}) { + switch v := data.(type) { + case map[string]interface{}: + m.maskMapRecursive(v) + case []interface{}: + for _, item := range v { + m.maskRecursive(item) + } + } +} + // maskMapRecursive recursively masks sensitive fields in nested maps func (m *MaskingMiddleware) maskMapRecursive(data map[string]interface{}) { for key, value := range data { diff --git a/pkg/middleware/masking_test.go b/pkg/middleware/masking_test.go index 389752c..55d942d 100644 --- a/pkg/middleware/masking_test.go +++ b/pkg/middleware/masking_test.go @@ -114,6 +114,18 @@ func TestMaskBody(t *testing.T) { body: `{"users":[{"name":"alice","password":"pass1"},{"name":"bob","secret":"pass2"}]}`, expected: `{"users":[{"name":"alice","password":"***REDACTED***"},{"name":"bob","secret":"***REDACTED***"}]}`, }, + { + name: "mask top-level array with sensitive fields", + enabled: true, + body: `[{"name":"alice","password":"secret1"},{"name":"bob","token":"secret2"}]`, + expected: `[{"name":"alice","password":"***REDACTED***"},{"name":"bob","token":"***REDACTED***"}]`, + }, + { + name: "mask nested arrays with sensitive fields", + enabled: true, + body: `[[{"password":"secret1"}],[{"api_key":"secret2"}]]`, + expected: `[[{"password":"***REDACTED***"}],[{"api_key":"***REDACTED***"}]]`, + }, { name: "mask multiple sensitive fields", enabled: true, @@ -127,7 +139,7 @@ func TestMaskBody(t *testing.T) { expected: `{"AccessToken":"***REDACTED***","Password":"***REDACTED***","SECRET":"***REDACTED***"}`, }, { - name: "non-JSON body unchanged", + name: "non-JSON body without sensitive data unchanged", enabled: true, body: `not json content`, expected: `not json content`, @@ -144,6 +156,49 @@ func TestMaskBody(t *testing.T) { body: `{"password":"secret"}`, expected: `{"password":"secret"}`, }, + // Fallback masking tests (non-JSON content with sensitive data) + { + name: "fallback: redact email addresses", + enabled: true, + body: `User email: alice@example.com, contact bob.smith@company.co.uk`, + expected: `User email: ***REDACTED_EMAIL***, contact ***REDACTED_EMAIL***`, + }, + { + name: "fallback: redact credit card numbers", + enabled: true, + body: `Card: 4532-1234-5678-9010 and 5425233430109903`, + expected: `Card: ***REDACTED_CC*** and ***REDACTED_CC***`, + }, + { + name: "fallback: redact Bearer tokens", + enabled: true, + body: `Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9`, + expected: `Authorization: Bearer ***REDACTED***`, + }, + { + name: "fallback: redact API keys", + enabled: true, + body: `API_KEY=sk_test_123456789abcdef and api-key: prod_key_xyz`, + expected: `API_KEY=***REDACTED*** and api-key: ***REDACTED***`, + }, + { + name: "fallback: redact form-encoded passwords", + enabled: true, + body: `username=alice&password=secret123&email=test@example.com`, + expected: `username=alice&password=***REDACTED***&email=***REDACTED_EMAIL***`, + }, + { + name: "fallback: redact multiple sensitive patterns", + enabled: true, + body: `User: alice@example.com, Token: secret_abc123, CC: 4532123456789010`, + expected: `User: ***REDACTED_EMAIL***, Token: ***REDACTED***, CC: ***REDACTED_CC***`, + }, + { + name: "fallback: disabled masking returns original", + enabled: false, + body: `password=secret123&api_key=test_key&user@example.com`, + expected: `password=secret123&api_key=test_key&user@example.com`, + }, } for _, tt := range tests { @@ -157,7 +212,7 @@ func TestMaskBody(t *testing.T) { m := NewMaskingMiddleware(cfg) result := m.MaskBody([]byte(tt.body)) - // For JSON, compare as maps to handle key ordering + // For JSON objects, compare as maps to handle key ordering if tt.body != "" && tt.body[0] == '{' { var resultMap, expectedMap map[string]interface{} if err := json.Unmarshal(result, &resultMap); err != nil { @@ -171,6 +226,20 @@ func TestMaskBody(t *testing.T) { if !deepEqual(resultMap, expectedMap) { t.Errorf("MaskBody() = %s, want %s", result, tt.expected) } + } else if tt.body != "" && tt.body[0] == '[' { + // For JSON arrays, compare as slices + var resultArray, expectedArray []interface{} + if err := json.Unmarshal(result, &resultArray); err != nil { + t.Fatalf("failed to unmarshal result array: %v", err) + } + if err := json.Unmarshal([]byte(tt.expected), &expectedArray); err != nil { + t.Fatalf("failed to unmarshal expected array: %v", err) + } + + // Deep comparison + if !deepEqualSlice(resultArray, expectedArray) { + t.Errorf("MaskBody() = %s, want %s", result, tt.expected) + } } else { // For non-JSON, compare as strings if string(result) != tt.expected { @@ -297,8 +366,17 @@ func deepEqualSlice(a, b []interface{}) bool { if !deepEqual(aMap, bMap) { return false } - } else if a[i] != b[i] { - return false + } else { + aSlice, aIsSlice := a[i].([]interface{}) + bSlice, bIsSlice := b[i].([]interface{}) + + if aIsSlice && bIsSlice { + if !deepEqualSlice(aSlice, bSlice) { + return false + } + } else if a[i] != b[i] { + return false + } } } diff --git a/pkg/middleware/otel.go b/pkg/middleware/otel.go index 93c32f9..809de78 100644 --- a/pkg/middleware/otel.go +++ b/pkg/middleware/otel.go @@ -10,36 +10,28 @@ import ( ) // OTelMiddleware wraps HTTP handlers with OpenTelemetry instrumentation -// Automatically: -// - Creates spans for HTTP requests -// - Extracts trace context from traceparent header (W3C Trace Context) -// - Injects trace context into outbound requests -// - Adds trace_id and span_id to logger context +// Extracts trace_id and span_id from OTel span and adds them to logger context func OTelMiddleware(handler http.Handler) http.Handler { - // Use otelhttp to automatically create spans - otelHandler := otelhttp.NewHandler(handler, "hyperfleet-api", - otelhttp.WithSpanNameFormatter(func(operation string, r *http.Request) string { - // Custom span name: "METHOD /path" - return r.Method + " " + r.URL.Path - }), - ) - - // Extract trace_id and span_id and add to context - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + enrichedHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ctx := r.Context() span := trace.SpanFromContext(ctx) - // If span context is valid, extract trace_id and span_id if span.SpanContext().IsValid() { traceID := span.SpanContext().TraceID().String() spanID := span.SpanContext().SpanID().String() - - // Add to logger context ctx = logger.WithTraceID(ctx, traceID) ctx = logger.WithSpanID(ctx, spanID) } - // Serve with updated context - otelHandler.ServeHTTP(w, r.WithContext(ctx)) + r = r.WithContext(ctx) + handler.ServeHTTP(w, r) }) + + otelHandler := otelhttp.NewHandler(enrichedHandler, "hyperfleet-api", + otelhttp.WithSpanNameFormatter(func(operation string, r *http.Request) string { + return r.Method + " " + r.URL.Path + }), + ) + + return otelHandler } diff --git a/pkg/middleware/schema_validation.go b/pkg/middleware/schema_validation.go index c15a883..4bd3c3e 100644 --- a/pkg/middleware/schema_validation.go +++ b/pkg/middleware/schema_validation.go @@ -14,18 +14,20 @@ import ( // handleValidationError writes validation error response func handleValidationError(w http.ResponseWriter, r *http.Request, err *errors.ServiceError) { - log := logger.NewOCMLogger(r.Context()) operationID := logger.GetOperationID(r.Context()) // Log validation errors as info (user error, not server error) - log.Infof(err.Error()) + logger.Info(r.Context(), err.Error()) // Write JSON error response w.Header().Set("Content-Type", "application/json") w.WriteHeader(err.HttpCode) if encodeErr := json.NewEncoder(w).Encode(err.AsOpenapiError(operationID)); encodeErr != nil { - log.Extra("endpoint", r.URL.Path).Extra("method", r.Method).Extra("status_code", err.HttpCode). - Extra("error", encodeErr.Error()).Error("Failed to encode validation error response") + logger.Error(r.Context(), "Failed to encode validation error response", + "endpoint", r.URL.Path, + "method", r.Method, + "status_code", err.HttpCode, + "error", encodeErr.Error()) } } @@ -48,8 +50,8 @@ func SchemaValidationMiddleware(validator *validators.SchemaValidator) func(http return } if closeErr := r.Body.Close(); closeErr != nil { - log := logger.NewOCMLogger(r.Context()) - log.Extra("error", closeErr.Error()).Warning("Failed to close request body") + logger.Warn(r.Context(), "Failed to close request body", + "error", closeErr.Error()) } // Restore the request body for the next handler diff --git a/pkg/services/cluster.go b/pkg/services/cluster.go index 0d492eb..6e400c0 100644 --- a/pkg/services/cluster.go +++ b/pkg/services/cluster.go @@ -99,21 +99,18 @@ func (s *sqlClusterService) All(ctx context.Context) (api.ClusterList, *errors.S } func (s *sqlClusterService) OnUpsert(ctx context.Context, id string) error { - logger := logger.NewOCMLogger(ctx) - cluster, err := s.clusterDao.Get(ctx, id) if err != nil { return err } - logger.Infof("Do idempotent somethings with this cluster: %s", cluster.ID) + logger.Info(ctx, "Perform idempotent operations on cluster", "cluster_id", cluster.ID) return nil } func (s *sqlClusterService) OnDelete(ctx context.Context, id string) error { - logger := logger.NewOCMLogger(ctx) - logger.Infof("This cluster has been deleted: %s", id) + logger.Info(ctx, "Cluster has been deleted", "cluster_id", id) return nil } diff --git a/pkg/services/generic.go b/pkg/services/generic.go index 1192bd3..f20a998 100755 --- a/pkg/services/generic.go +++ b/pkg/services/generic.go @@ -55,7 +55,6 @@ type listContext struct { args *ListArguments username string pagingMeta *api.PagingMeta - ulog *logger.OCMLogger resourceList interface{} disallowedFields *map[string]string resourceType string @@ -65,7 +64,6 @@ type listContext struct { } func (s *sqlGenericService) newListContext(ctx context.Context, username string, args *ListArguments, resourceList interface{}) (*listContext, interface{}, *errors.ServiceError) { - log := logger.NewOCMLogger(ctx) resourceModel := reflect.TypeOf(resourceList).Elem().Elem() resourceTypeStr := resourceModel.Name() if resourceTypeStr == "" { @@ -81,7 +79,6 @@ func (s *sqlGenericService) newListContext(ctx context.Context, username string, args: args, username: username, pagingMeta: &api.PagingMeta{Page: args.Page}, - ulog: &log, resourceList: resourceList, disallowedFields: &disallowedFields, resourceType: resourceTypeStr, @@ -236,7 +233,6 @@ func (s *sqlGenericService) addJoins(listCtx *listContext, d *dao.GenericDao) { func (s *sqlGenericService) loadList(listCtx *listContext, d *dao.GenericDao) *errors.ServiceError { args := listCtx.args - ulog := *listCtx.ulog (*d).Count(listCtx.resourceList, &listCtx.pagingMeta.Total) @@ -247,13 +243,13 @@ func (s *sqlGenericService) loadList(listCtx *listContext, d *dao.GenericDao) *e switch { case args.Size > MaxListSize: - ulog.Warning("A query with a size greater than the maximum was requested.") + logger.Warn(listCtx.ctx, "A query with a size greater than the maximum was requested.") case args.Size < 0: - ulog.Warning("A query with an unbound size was requested.") + logger.Warn(listCtx.ctx, "A query with an unbound size was requested.") case args.Size == 0: // This early return is not only performant, but also necessary. // gorm does not support Limit(0) any longer. - ulog.Infof("A query with 0 size requested, returning early without collecting any resources from database") + logger.Info(listCtx.ctx, "A query with 0 size requested, returning early without collecting any resources from database") return nil } diff --git a/pkg/services/node_pool.go b/pkg/services/node_pool.go index 0b949c4..67515e1 100644 --- a/pkg/services/node_pool.go +++ b/pkg/services/node_pool.go @@ -99,21 +99,18 @@ func (s *sqlNodePoolService) All(ctx context.Context) (api.NodePoolList, *errors } func (s *sqlNodePoolService) OnUpsert(ctx context.Context, id string) error { - logger := logger.NewOCMLogger(ctx) - nodePool, err := s.nodePoolDao.Get(ctx, id) if err != nil { return err } - logger.Infof("Do idempotent somethings with this nodePool: %s", nodePool.ID) + logger.Info(ctx, "Perform idempotent operations on node pool", "nodepool_id", nodePool.ID) return nil } func (s *sqlNodePoolService) OnDelete(ctx context.Context, id string) error { - logger := logger.NewOCMLogger(ctx) - logger.Infof("This nodePool has been deleted: %s", id) + logger.Info(ctx, "Node pool has been deleted", "nodepool_id", id) return nil } diff --git a/pkg/services/status_aggregation.go b/pkg/services/status_aggregation.go index f6ac507..9a1e3e5 100644 --- a/pkg/services/status_aggregation.go +++ b/pkg/services/status_aggregation.go @@ -3,7 +3,6 @@ package services import ( "context" "encoding/json" - "fmt" "strings" "unicode" @@ -97,7 +96,6 @@ func MapAdapterToConditionType(adapterName string) string { // Note: Post-MVP will add more phases (Pending, Provisioning, Failed, Degraded) // based on Health condition and Applied condition states. func ComputePhase(ctx context.Context, adapterStatuses api.AdapterStatusList, requiredAdapters []string, resourceGeneration int32) string { - log := logger.NewOCMLogger(ctx) if len(adapterStatuses) == 0 { return "NotReady" } @@ -141,15 +139,17 @@ func ComputePhase(ctx context.Context, adapterStatuses api.AdapterStatusList, re if !exists { // Required adapter not found - log.Warning(fmt.Sprintf("Required adapter '%s' not found in adapter statuses", adapterName)) + logger.Warn(ctx, "Required adapter not found in adapter statuses", "adapter", adapterName) continue } // Check generation matching only if resourceGeneration > 0 if resourceGeneration > 0 && adapterInfo.observedGeneration != resourceGeneration { // Adapter is processing old generation (stale) - log.Warning(fmt.Sprintf("Required adapter '%s' has stale generation: observed=%d, expected=%d", - adapterName, adapterInfo.observedGeneration, resourceGeneration)) + logger.Warn(ctx, "Required adapter has stale generation", + "adapter", adapterName, + "observed_generation", adapterInfo.observedGeneration, + "expected_generation", resourceGeneration) continue } diff --git a/pkg/telemetry/otel.go b/pkg/telemetry/otel.go index 1771fbb..6778c15 100644 --- a/pkg/telemetry/otel.go +++ b/pkg/telemetry/otel.go @@ -7,7 +7,7 @@ import ( "go.opentelemetry.io/otel/exporters/stdout/stdouttrace" "go.opentelemetry.io/otel/sdk/resource" "go.opentelemetry.io/otel/sdk/trace" - semconv "go.opentelemetry.io/otel/semconv/v1.4.0" + semconv "go.opentelemetry.io/otel/semconv/v1.37.0" ) // InitTraceProvider initializes OpenTelemetry trace provider diff --git a/test/helper.go b/test/helper.go index f79b9fa..3089589 100755 --- a/test/helper.go +++ b/test/helper.go @@ -6,6 +6,7 @@ import ( "encoding/base64" "encoding/json" "fmt" + "log/slog" "os" "strings" "sync" @@ -14,7 +15,6 @@ import ( "github.com/bxcodec/faker/v3" "github.com/golang-jwt/jwt/v4" - "github.com/golang/glog" "github.com/google/uuid" "github.com/segmentio/ksuid" "github.com/spf13/pflag" @@ -26,6 +26,7 @@ import ( "github.com/openshift-hyperfleet/hyperfleet-api/pkg/api/openapi" "github.com/openshift-hyperfleet/hyperfleet-api/pkg/config" "github.com/openshift-hyperfleet/hyperfleet-api/pkg/db" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/logger" "github.com/openshift-hyperfleet/hyperfleet-api/test/factories" "github.com/openshift-hyperfleet/hyperfleet-api/test/mocks" ) @@ -68,6 +69,10 @@ type Helper struct { func NewHelper(t *testing.T) *Helper { once.Do(func() { + // Initialize logger first + initTestLogger() + ctx := context.Background() + jwtKey, jwtCA, err := parseJWTKeys() if err != nil { fmt.Println("Unable to read JWT keys - this may affect tests that make authenticated server requests") @@ -76,10 +81,11 @@ func NewHelper(t *testing.T) *Helper { env := environments.Environment() err = env.AddFlags(pflag.CommandLine) if err != nil { - glog.Fatalf("Unable to add environment flags: %s", err.Error()) + logger.Error(ctx, "Unable to add environment flags", "error", err) + os.Exit(1) } if logLevel := os.Getenv("LOGLEVEL"); logLevel != "" { - glog.Infof("Using custom loglevel: %s", logLevel) + logger.Info(ctx, "Using custom loglevel", "level", logLevel) // Intentionally ignore error from Set — acceptable for tests _ = pflag.CommandLine.Set("-v", logLevel) } @@ -87,7 +93,8 @@ func NewHelper(t *testing.T) *Helper { err = env.Initialize() if err != nil { - glog.Fatalf("Unable to initialize testing environment: %s", err.Error()) + logger.Error(ctx, "Unable to initialize testing environment", "error", err) + os.Exit(1) } helper = &Helper{ @@ -132,17 +139,19 @@ func (helper *Helper) Teardown() { } func (helper *Helper) startAPIServer() { + ctx := context.Background() // Configure JWK certificate URL for API server helper.Env().Config.Server.JwkCertURL = jwkURL helper.APIServer = server.NewAPIServer() listener, err := helper.APIServer.Listen() if err != nil { - glog.Fatalf("Unable to start Test API server: %s", err) + logger.Error(ctx, "Unable to start Test API server", "error", err) + os.Exit(1) } go func() { - glog.V(10).Info("Test API server started") + logger.Debug(ctx, "Test API server started") helper.APIServer.Serve(listener) - glog.V(10).Info("Test API server stopped") + logger.Debug(ctx, "Test API server stopped") }() } @@ -154,45 +163,52 @@ func (helper *Helper) stopAPIServer() error { } func (helper *Helper) startMetricsServer() { + ctx := context.Background() helper.MetricsServer = server.NewMetricsServer() go func() { - glog.V(10).Info("Test Metrics server started") + logger.Debug(ctx, "Test Metrics server started") helper.MetricsServer.Start() - glog.V(10).Info("Test Metrics server stopped") + logger.Debug(ctx, "Test Metrics server stopped") }() } func (helper *Helper) stopMetricsServer() { + ctx := context.Background() if err := helper.MetricsServer.Stop(); err != nil { - glog.Fatalf("Unable to stop metrics server: %s", err.Error()) + logger.Error(ctx, "Unable to stop metrics server", "error", err) + os.Exit(1) } } func (helper *Helper) startHealthCheckServer() { + ctx := context.Background() helper.HealthCheckServer = server.NewHealthCheckServer() go func() { - glog.V(10).Info("Test health check server started") + logger.Debug(ctx, "Test health check server started") helper.HealthCheckServer.Start() - glog.V(10).Info("Test health check server stopped") + logger.Debug(ctx, "Test health check server stopped") }() } func (helper *Helper) RestartServer() { + ctx := context.Background() if err := helper.stopAPIServer(); err != nil { - glog.Warningf("unable to stop api server on restart: %v", err) + logger.Warn(ctx, "unable to stop api server on restart", "error", err) } helper.startAPIServer() - glog.V(10).Info("Test API server restarted") + logger.Debug(ctx, "Test API server restarted") } func (helper *Helper) RestartMetricsServer() { + ctx := context.Background() helper.stopMetricsServer() helper.startMetricsServer() - glog.V(10).Info("Test metrics server restarted") + logger.Debug(ctx, "Test metrics server restarted") } func (helper *Helper) Reset() { - glog.Infof("Reseting testing environment") + ctx := context.Background() + logger.Info(ctx, "Reseting testing environment") env := environments.Environment() // Reset the configuration env.Config = config.NewApplicationConfig() @@ -202,13 +218,15 @@ func (helper *Helper) Reset() { // Also on reset, we don't care to be re-defining 'v' and other glog flags flagset := pflag.NewFlagSet(helper.NewID(), pflag.ContinueOnError) if err := env.AddFlags(flagset); err != nil { - glog.Fatalf("Unable to add environment flags on Reset: %s", err.Error()) + logger.Error(ctx, "Unable to add environment flags on Reset", "error", err) + os.Exit(1) } pflag.Parse() err := env.Initialize() if err != nil { - glog.Fatalf("Unable to reset testing environment: %s", err.Error()) + logger.Error(ctx, "Unable to reset testing environment", "error", err) + os.Exit(1) } helper.AppConfig = env.Config helper.RestartServer() @@ -522,3 +540,16 @@ ZzJoM2lMdlV0d2NObnkwaFJLc3RjNzNKbDl4UjNxWGZYS0pIMFRoVGwKcTBncQotLS0tLUVORCBD RVJUSUZJQ0FURS0tLS0tCg==` return base64.StdEncoding.DecodeString(s) } + +// initTestLogger initializes a default logger for tests +func initTestLogger() { + cfg := &logger.LogConfig{ + Level: slog.LevelInfo, + Format: logger.FormatText, // Use text format for test readability + Output: os.Stdout, + Component: "hyperfleet-api-test", + Version: "test", + Hostname: "test-host", + } + logger.InitGlobalLogger(cfg) +} diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index d0b68ff..17e45fb 100755 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -1,20 +1,21 @@ package integration import ( + "context" "flag" "os" "path/filepath" "runtime" "testing" - "github.com/golang/glog" - + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/logger" "github.com/openshift-hyperfleet/hyperfleet-api/test" ) func TestMain(m *testing.M) { flag.Parse() - glog.Infof("Starting integration test using go version %s", runtime.Version()) + ctx := context.Background() + logger.Infof(ctx, "Starting integration test using go version %s", runtime.Version()) // Set OPENAPI_SCHEMA_PATH for integration tests if not already set // This enables schema validation middleware during tests @@ -23,7 +24,7 @@ func TestMain(m *testing.M) { // Use runtime.Caller to find this file's path _, filename, _, ok := runtime.Caller(0) if !ok { - glog.Warningf("Failed to determine current file path via runtime.Caller, skipping OPENAPI_SCHEMA_PATH setup") + logger.Warn(ctx, "Failed to determine current file path via runtime.Caller, skipping OPENAPI_SCHEMA_PATH setup") } else { // filename is like: /path/to/repo/test/integration/integration_test.go // Navigate up: integration_test.go -> integration -> test -> repo @@ -36,10 +37,10 @@ func TestMain(m *testing.M) { // Verify the schema file exists before setting the env var if _, err := os.Stat(schemaPath); err != nil { - glog.Warningf("Schema file not found at %s: %v, skipping OPENAPI_SCHEMA_PATH setup", schemaPath, err) + logger.Warn(ctx, "Schema file not found, skipping OPENAPI_SCHEMA_PATH setup", "schema_path", schemaPath, "error", err) } else { _ = os.Setenv("OPENAPI_SCHEMA_PATH", schemaPath) - glog.Infof("Set OPENAPI_SCHEMA_PATH=%s for integration tests", schemaPath) + logger.Infof(ctx, "Set OPENAPI_SCHEMA_PATH=%s for integration tests", schemaPath) } } }