Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion .github/workflows/component-tests.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ on:
branches:
- feat/signature-verification
- feat/tamperalert
- feat/tamper-detection
workflow_dispatch:
inputs:
build_image:
Expand Down Expand Up @@ -203,7 +204,8 @@ jobs:
Test_27_ApplicationProfileOpens,
Test_28_UserDefinedNetworkNeighborhood,
Test_29_SignedApplicationProfile,
Test_30_TamperedSignedProfiles
Test_30_TamperedSignedProfiles,
Test_31_TamperDetectionAlert
]
steps:
- name: Checkout code
Expand Down
4 changes: 2 additions & 2 deletions cmd/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -294,10 +294,10 @@ func main() {
ruleBindingNotify = make(chan rulebinding.RuleBindingNotify, 100)
ruleBindingCache.AddNotifier(&ruleBindingNotify)

apc := applicationprofilecache.NewApplicationProfileCache(cfg, storageClient, k8sObjectCache)
apc := applicationprofilecache.NewApplicationProfileCache(cfg, storageClient, k8sObjectCache, exporter)
apc.Start(ctx)

nnc := networkneighborhoodcache.NewNetworkNeighborhoodCache(cfg, storageClient, k8sObjectCache)
nnc := networkneighborhoodcache.NewNetworkNeighborhoodCache(cfg, storageClient, k8sObjectCache, exporter)
nnc.Start(ctx)

dc := dnscache.NewDnsCache(dnsResolver)
Expand Down
128 changes: 99 additions & 29 deletions pkg/objectcache/applicationprofilecache/applicationprofilecache.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@ package applicationprofilecache

import (
"context"
"errors"
"fmt"
"strings"
"sync"
Expand All @@ -15,10 +14,13 @@ import (
"github.com/kubescape/go-logger"
"github.com/kubescape/go-logger/helpers"
helpersv1 "github.com/kubescape/k8s-interface/instanceidhandler/v1/helpers"
"github.com/armosec/armoapi-go/armotypes"
"github.com/kubescape/node-agent/pkg/config"
"github.com/kubescape/node-agent/pkg/exporters"
"github.com/kubescape/node-agent/pkg/objectcache"
"github.com/kubescape/node-agent/pkg/objectcache/applicationprofilecache/callstackcache"
"github.com/kubescape/node-agent/pkg/resourcelocks"
"github.com/kubescape/node-agent/pkg/rulemanager/types"
"github.com/kubescape/node-agent/pkg/signature"
"github.com/kubescape/node-agent/pkg/signature/profiles"
"github.com/kubescape/node-agent/pkg/storage"
Expand Down Expand Up @@ -51,14 +53,15 @@ type ApplicationProfileCacheImpl struct {
containerToCallStackIndex maps.SafeMap[string, *ContainerCallStackIndex]
storageClient storage.ProfileClient
k8sObjectCache objectcache.K8sObjectCache
exporter exporters.Exporter // Exporter for sending tamper detection alerts
updateInterval time.Duration
updateInProgress bool // Flag to track if update is in progress
updateMutex sync.Mutex // Mutex to protect the flag
containerLocks *resourcelocks.ResourceLocks // Locks for each container to prevent concurrent modifications
}

// NewApplicationProfileCache creates a new application profile cache with periodic updates
func NewApplicationProfileCache(cfg config.Config, storageClient storage.ProfileClient, k8sObjectCache objectcache.K8sObjectCache) *ApplicationProfileCacheImpl {
func NewApplicationProfileCache(cfg config.Config, storageClient storage.ProfileClient, k8sObjectCache objectcache.K8sObjectCache, exporter exporters.Exporter) *ApplicationProfileCacheImpl {
updateInterval := utils.AddJitter(cfg.ProfilesCacheRefreshRate, 10) // Add 10% jitter to avoid high load on the storage

apc := &ApplicationProfileCacheImpl{
Expand All @@ -70,6 +73,7 @@ func NewApplicationProfileCache(cfg config.Config, storageClient storage.Profile
containerToCallStackIndex: maps.SafeMap[string, *ContainerCallStackIndex]{},
storageClient: storageClient,
k8sObjectCache: k8sObjectCache,
exporter: exporter,
updateInterval: updateInterval,
containerLocks: resourcelocks.New(),
}
Expand Down Expand Up @@ -274,42 +278,101 @@ func (apc *ApplicationProfileCacheImpl) updateAllProfiles(ctx context.Context) {
}
}

// verifyApplicationProfile verifies the profile signature if verification is enabled.
// Returns error if verification fails, nil otherwise (including when verification is disabled).
// Also updates profileState with error details if verification fails.
// verifyApplicationProfile verifies the profile signature.
// Always checks signed profiles for tamper (emits R1016 alert on tamper).
// When EnableSignatureVerification is true, also rejects tampered/unsigned profiles.
// Returns error if the profile should not be loaded, nil otherwise.
func (apc *ApplicationProfileCacheImpl) verifyApplicationProfile(profile *v1beta1.ApplicationProfile, workloadID, context string, recordFailure bool) error {
if !apc.cfg.EnableSignatureVerification {
return nil
}
profileAdapter := profiles.NewApplicationProfileAdapter(profile)
if err := signature.VerifyObject(profileAdapter); err != nil {
// Only warn if signature exists but doesn't match; missing signatures are debug
if errors.Is(err, signature.ErrObjectNotSigned) {
logger.L().Debug(context+" is not signed, skipping",
helpers.String("profile", profile.Name),
helpers.String("namespace", profile.Namespace),
helpers.String("workloadID", workloadID))
} else {
logger.L().Warning(context+" signature verification failed, skipping",

// Always check signed profiles for tamper, regardless of enforcement setting
if signature.IsSigned(profileAdapter) {
if err := signature.VerifyObject(profileAdapter); err != nil {
// Signed profile failed verification → tamper detected
logger.L().Warning(context+" signature verification failed (tamper detected)",
helpers.String("profile", profile.Name),
helpers.String("namespace", profile.Namespace),
helpers.String("workloadID", workloadID),
helpers.Error(err))

// Emit R1016 tamper alert
apc.emitTamperAlert(profile.Name, profile.Namespace, workloadID, "ApplicationProfile", err)

if apc.cfg.EnableSignatureVerification {
if recordFailure {
apc.setVerificationFailed(workloadID, profile.Name, err)
}
return err
}
// Enforcement off: allow loading despite tamper
return nil
}
logger.L().Debug(context+" verification successful",
helpers.String("profile", profile.Name),
helpers.String("namespace", profile.Namespace))
return nil
}

// Update profile state with verification error
// Profile is not signed
if apc.cfg.EnableSignatureVerification {
logger.L().Debug(context+" is not signed, skipping",
helpers.String("profile", profile.Name),
helpers.String("namespace", profile.Namespace),
helpers.String("workloadID", workloadID))
if recordFailure {
apc.setVerificationFailed(workloadID, profile.Name, err)
apc.setVerificationFailed(workloadID, profile.Name, signature.ErrObjectNotSigned)
}

return err
return signature.ErrObjectNotSigned
}
logger.L().Debug(context+" verification successful",
helpers.String("profile", profile.Name),
helpers.String("namespace", profile.Namespace))

return nil
}

// emitTamperAlert sends an R1016 "Signed profile tampered" alert via the exporter.
func (apc *ApplicationProfileCacheImpl) emitTamperAlert(profileName, namespace, workloadID, objectKind string, verifyErr error) {
if apc.exporter == nil {
return
}

ruleFailure := &types.GenericRuleFailure{
BaseRuntimeAlert: armotypes.BaseRuntimeAlert{
AlertName: "Signed profile tampered",
InfectedPID: 1,
Severity: 10,
FixSuggestions: "Investigate who modified the " + objectKind + " '" + profileName + "' in namespace '" + namespace + "'. Re-sign the profile after verifying its contents.",
},
AlertType: armotypes.AlertTypeRule,
RuntimeProcessDetails: armotypes.ProcessTree{
ProcessTree: armotypes.Process{
PID: 1,
Comm: "node-agent",
},
},
RuleAlert: armotypes.RuleAlert{
RuleDescription: fmt.Sprintf("Signed %s '%s' in namespace '%s' has been tampered with: %v", objectKind, profileName, namespace, verifyErr),
},
RuntimeAlertK8sDetails: armotypes.RuntimeAlertK8sDetails{
Namespace: namespace,
},
RuleID: "R1016",
}

// Populate workload details from workloadID if available
ruleFailure.SetWorkloadDetails(extractWlidFromWorkloadID(workloadID))

apc.exporter.SendRuleAlert(ruleFailure)
}

// extractWlidFromWorkloadID extracts the wlid part from a "wlid/templateHash" key.
func extractWlidFromWorkloadID(workloadID string) string {
if idx := strings.LastIndex(workloadID, "/"); idx > 0 {
// workloadID format is "wlid://<cluster>/<namespace>/<kind>/<name>/<templateHash>"
// We need everything before the last "/" which is the templateHash
return workloadID[:idx]
}
return workloadID
}

func (apc *ApplicationProfileCacheImpl) setVerificationFailed(workloadID, profileName string, err error) {
profileState := &objectcache.ProfileState{
Completion: "failed",
Expand Down Expand Up @@ -557,7 +620,9 @@ func (apc *ApplicationProfileCacheImpl) addContainer(container *containercollect
}
}
} else {
apc.workloadIDToProfileState.Set(workloadID, nil)
apc.workloadIDToProfileState.Set(workloadID, &objectcache.ProfileState{
Error: fmt.Errorf("waiting for profile update"),
})
}

// Create container info
Expand Down Expand Up @@ -607,6 +672,12 @@ func (apc *ApplicationProfileCacheImpl) addContainer(container *containercollect

// Update the profile in the cache
apc.workloadIDToProfile.Set(workloadID, fullProfile)
profileState := &objectcache.ProfileState{
Completion: fullProfile.Annotations[helpersv1.CompletionMetadataKey],
Status: fullProfile.Annotations[helpersv1.StatusMetadataKey],
Name: fullProfile.Name,
}
apc.workloadIDToProfileState.Set(workloadID, profileState)
logger.L().Debug("added user-defined profile to cache",
helpers.String("containerID", containerID),
helpers.String("workloadID", workloadID),
Expand Down Expand Up @@ -782,10 +853,9 @@ func (apc *ApplicationProfileCacheImpl) GetApplicationProfileState(containerID s
if profileState, exists := apc.workloadIDToProfileState.Load(workloadID); exists {
if profileState != nil {
return profileState
} else {
return &objectcache.ProfileState{
Error: fmt.Errorf("profile state not available - shouldn't happen"),
}
}
return &objectcache.ProfileState{
Error: fmt.Errorf("application profile state is nil for workload %s", workloadID),
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ func TestPagination(t *testing.T) {
spy := &SpyProfileClient{Profiles: profiles}

// mock k8s object cache is irrelevant since we inject container info directly
cache := NewApplicationProfileCache(config.Config{}, spy, nil)
cache := NewApplicationProfileCache(config.Config{}, spy, nil, nil)

// Inject a container so that "default" namespace is processed.
// The WorkloadID needs to match something if we want deeper logic to run,
Expand Down
Loading
Loading