diff --git a/cmd/activator/.main.go.swp b/cmd/activator/.main.go.swp new file mode 100644 index 000000000000..d00359f3e4b3 Binary files /dev/null and b/cmd/activator/.main.go.swp differ diff --git a/cmd/activator/main.go b/cmd/activator/main.go index 3d2633023d38..b3e561a1de43 100644 --- a/cmd/activator/main.go +++ b/cmd/activator/main.go @@ -19,6 +19,7 @@ package main import ( "context" "crypto/tls" + "crypto/x509" "errors" "fmt" "log" @@ -153,6 +154,36 @@ func main() { logger.Fatalw("Failed to construct network config", zap.Error(err)) } + // Enable TLS against queue-proxy when the CA and SA are specified. + tlsEnabled := networkConfig.QueueProxyCA != "" && networkConfig.QueueProxySAN != "" + + // Enable TLS client when queue-proxy-ca is specified. + // At this moment activator with TLS does not disable HTTP. + // See also https://github.com/knative/serving/issues/12808. + if tlsEnabled { + caSecret, err := kubeClient.CoreV1().Secrets(system.Namespace()).Get(ctx, networkConfig.QueueProxyCA, metav1.GetOptions{}) + if err != nil { + logger.Fatalw("Failed to get secret", zap.Error(err)) + } + + pool, err := x509.SystemCertPool() + if err != nil { + pool = x509.NewCertPool() + } + + if ok := pool.AppendCertsFromPEM(caSecret.Data["ca.crt"]); !ok { + logger.Fatalw("Failed to append ca cert to the RootCAs") + } + + tlsConf := &tls.Config{ + RootCAs: pool, + InsecureSkipVerify: false, + ServerName: networkConfig.QueueProxySAN, + MinVersion: tls.VersionTLS12, + } + transport = pkgnet.NewProxyAutoTLSTransport(env.MaxIdleProxyConns, env.MaxIdleProxyConnsPerHost, tlsConf) + } + // Start throttler. throttler := activatornet.NewThrottler(ctx, env.PodIP) go throttler.Run(ctx, transport, networkConfig.EnableMeshPodAddressability, networkConfig.MeshCompatibilityMode) @@ -188,7 +219,7 @@ func main() { // Create activation handler chain // Note: innermost handlers are specified first, ie. the last handler in the chain will be executed first - ah := activatorhandler.New(ctx, throttler, transport, networkConfig.EnableMeshPodAddressability, logger) + ah := activatorhandler.New(ctx, throttler, transport, networkConfig.EnableMeshPodAddressability, logger, tlsEnabled) ah = concurrencyReporter.Handler(ah) ah = activatorhandler.NewTracingHandler(ah) reqLogHandler, err := pkghttp.NewRequestLogHandler(ah, logging.NewSyncFileWriter(os.Stdout), "", diff --git a/cmd/queue/main.go b/cmd/queue/main.go index 40a517d762be..a896672ca31c 100644 --- a/cmd/queue/main.go +++ b/cmd/queue/main.go @@ -62,11 +62,18 @@ const ( // This is to give networking a little bit more time to remove the pod // from its configuration and propagate that to all loadbalancers and nodes. drainSleepDuration = 30 * time.Second + + // certPath is the path for the server certificate mounted by queue-proxy. + certPath = queue.CertDirectory + "/tls.crt" + + // keyPath is the path for the server certificate key mounted by queue-proxy. + keyPath = queue.CertDirectory + "/tls.key" ) type config struct { ContainerConcurrency int `split_words:"true" required:"true"` QueueServingPort string `split_words:"true" required:"true"` + QueueServingTLSPort string `split_words:"true" required:"true"` UserPort string `split_words:"true" required:"true"` RevisionTimeoutSeconds int `split_words:"true" required:"true"` MaxDurationSeconds int `split_words:"true"` // optional @@ -162,18 +169,37 @@ func main() { if env.ConcurrencyStateEndpoint != "" { concurrencyendpoint = queue.NewConcurrencyEndpoint(env.ConcurrencyStateEndpoint, env.ConcurrencyStateTokenPath) } - mainServer, drain := buildServer(ctx, env, probe, stats, logger, concurrencyendpoint) - servers := map[string]*http.Server{ + + // Enable TLS when certificate is mounted. + tlsEnabled := exists(logger, certPath) && exists(logger, keyPath) + + mainServer, drain := buildServer(ctx, env, probe, stats, logger, concurrencyendpoint, false) + httpServers := map[string]*http.Server{ "main": mainServer, - "admin": buildAdminServer(logger, drain), "metrics": buildMetricsServer(promStatReporter, protoStatReporter), + "admin": buildAdminServer(logger, drain), } if env.EnableProfiling { - servers["profile"] = profiling.NewServer(profiling.NewHandler(logger, true)) + httpServers["profile"] = profiling.NewServer(profiling.NewHandler(logger, true)) + } + + // Enable TLS server when activator server certs are mounted. + // At this moment activator with TLS does not disable HTTP. + // See also https://github.com/knative/serving/issues/12808. + var tlsServers map[string]*http.Server + if tlsEnabled { + mainTLSServer, drain := buildServer(ctx, env, probe, stats, logger, concurrencyendpoint, true /* enable TLS */) + tlsServers = map[string]*http.Server{ + "tlsMain": mainTLSServer, + "tlsAdmin": buildAdminServer(logger, drain), + } + // Drop admin http server as we Use TLS for the admin server. + // TODO: The drain created with mainServer above is lost. Unify the two drain. + delete(httpServers, "admin") } errCh := make(chan error) - for name, server := range servers { + for name, server := range httpServers { go func(name string, s *http.Server) { // Don't forward ErrServerClosed as that indicates we're already shutting down. if err := s.ListenAndServe(); err != nil && !errors.Is(err, http.ErrServerClosed) { @@ -181,6 +207,14 @@ func main() { } }(name, server) } + for name, server := range tlsServers { + go func(name string, s *http.Server) { + // Don't forward ErrServerClosed as that indicates we're already shutting down. + if err := s.ListenAndServeTLS(certPath, keyPath); err != nil && !errors.Is(err, http.ErrServerClosed) { + errCh <- fmt.Errorf("%s server failed to serve: %w", name, err) + } + }(name, server) + } // Blocks until we actually receive a TERM signal or one of the servers // exits unexpectedly. We fold both signals together because we only want @@ -200,9 +234,9 @@ func main() { drain() // Removing the main server from the shutdown logic as we've already shut it down. - delete(servers, "main") + delete(httpServers, "main") - for serverName, srv := range servers { + for serverName, srv := range httpServers { logger.Info("Shutting down server: ", serverName) if err := srv.Shutdown(context.Background()); err != nil { logger.Errorw("Failed to shutdown server", zap.String("server", serverName), zap.Error(err)) @@ -212,6 +246,14 @@ func main() { } } +func exists(logger *zap.SugaredLogger, filename string) bool { + _, err := os.Stat(filename) + if err != nil && !os.IsNotExist(err) { + logger.Fatalw(fmt.Sprintf("Failed to verify the file path %q", filename), zap.Error(err)) + } + return err == nil +} + func buildProbe(logger *zap.SugaredLogger, encodedProbe string, autodetectHTTP2 bool) *readiness.Probe { coreProbe, err := readiness.DecodeProbe(encodedProbe) if err != nil { @@ -224,18 +266,20 @@ func buildProbe(logger *zap.SugaredLogger, encodedProbe string, autodetectHTTP2 } func buildServer(ctx context.Context, env config, probeContainer func() bool, stats *network.RequestStats, logger *zap.SugaredLogger, - ce *queue.ConcurrencyEndpoint) (server *http.Server, drain func()) { + ce *queue.ConcurrencyEndpoint, enableTLS bool) (server *http.Server, drain func()) { + // TODO: If TLS is enabled, execute probes twice and tracking two different sets of container health. target := net.JoinHostPort("127.0.0.1", env.UserPort) - httpProxy := pkghttp.NewHeaderPruningReverseProxy(target, pkghttp.NoHostOverride, activator.RevisionHeaders) + httpProxy := pkghttp.NewHeaderPruningReverseProxy(target, pkghttp.NoHostOverride, activator.RevisionHeaders, false /* use HTTP */) httpProxy.Transport = buildTransport(env, logger) httpProxy.ErrorHandler = pkghandler.Error(logger) httpProxy.BufferPool = network.NewBufferPool() httpProxy.FlushInterval = network.FlushInterval + // TODO: During HTTP and HTTPS transition, counting concurrency could not be accurate. Count accurately. breaker := buildBreaker(logger, env) - metricsSupported := supportsMetrics(ctx, logger, env) + metricsSupported := supportsMetrics(ctx, logger, env, enableTLS) tracingEnabled := env.TracingConfigBackend != tracingconfig.None concurrencyStateEnabled := env.ConcurrencyStateEndpoint != "" firstByteTimeout := time.Duration(env.RevisionTimeoutSeconds) * time.Second @@ -287,6 +331,10 @@ func buildServer(ctx context.Context, env config, probeContainer func() bool, st composedHandler = requestLogHandler(logger, composedHandler, env) } + if enableTLS { + return pkgnet.NewServer(":"+env.QueueServingTLSPort, composedHandler), drainer.Drain + } + return pkgnet.NewServer(":"+env.QueueServingPort, composedHandler), drainer.Drain } @@ -333,12 +381,15 @@ func buildBreaker(logger *zap.SugaredLogger, env config) *queue.Breaker { return queue.NewBreaker(params) } -func supportsMetrics(ctx context.Context, logger *zap.SugaredLogger, env config) bool { +func supportsMetrics(ctx context.Context, logger *zap.SugaredLogger, env config, enableTLS bool) bool { + // Keep it on HTTP because Metrics needs to be registered on either TLS server or non-TLS server. + if enableTLS { + return false + } // Setup request metrics reporting for end-user metrics. if env.ServingRequestMetricsBackend == "" { return false } - if err := setupMetricsExporter(ctx, logger, env.ServingRequestMetricsBackend, env.MetricsCollectorAddress); err != nil { logger.Errorw("Error setting up request metrics exporter. Request metrics will be unavailable.", zap.Error(err)) return false diff --git a/pkg/activator/handler/handler.go b/pkg/activator/handler/handler.go index 85e72261e178..c4183ae504da 100644 --- a/pkg/activator/handler/handler.go +++ b/pkg/activator/handler/handler.go @@ -21,6 +21,8 @@ import ( "errors" "net/http" "net/http/httputil" + "strconv" + "strings" "go.opencensus.io/plugin/ochttp" "go.opencensus.io/trace" @@ -35,6 +37,7 @@ import ( "knative.dev/serving/pkg/activator" activatorconfig "knative.dev/serving/pkg/activator/config" pkghttp "knative.dev/serving/pkg/http" + "knative.dev/serving/pkg/networking" "knative.dev/serving/pkg/queue" "knative.dev/serving/pkg/reconciler/serverlessservice/resources/names" ) @@ -53,10 +56,11 @@ type activationHandler struct { throttler Throttler bufferPool httputil.BufferPool logger *zap.SugaredLogger + tls bool } // New constructs a new http.Handler that deals with revision activation. -func New(_ context.Context, t Throttler, transport http.RoundTripper, usePassthroughLb bool, logger *zap.SugaredLogger) http.Handler { +func New(_ context.Context, t Throttler, transport http.RoundTripper, usePassthroughLb bool, logger *zap.SugaredLogger, tlsEnabled bool) http.Handler { return &activationHandler{ transport: transport, tracingTransport: &ochttp.Transport{ @@ -67,6 +71,7 @@ func New(_ context.Context, t Throttler, transport http.RoundTripper, usePassthr throttler: t, bufferPool: network.NewBufferPool(), logger: logger, + tls: tlsEnabled, } } @@ -116,7 +121,14 @@ func (a *activationHandler) proxyRequest(revID types.NamespacedName, w http.Resp if usePassthroughLb { hostOverride = names.PrivateService(revID.Name) + "." + revID.Namespace } - proxy := pkghttp.NewHeaderPruningReverseProxy(target, hostOverride, activator.RevisionHeaders) + + var proxy *httputil.ReverseProxy + if a.tls { + proxy = pkghttp.NewHeaderPruningReverseProxy(useSecurePort(target), hostOverride, activator.RevisionHeaders, true /* uss HTTPS */) + } else { + proxy = pkghttp.NewHeaderPruningReverseProxy(target, hostOverride, activator.RevisionHeaders, false /* use HTTPS */) + } + proxy.BufferPool = a.bufferPool proxy.Transport = a.transport if tracingEnabled { @@ -129,3 +141,11 @@ func (a *activationHandler) proxyRequest(revID types.NamespacedName, w http.Resp proxy.ServeHTTP(w, r) } + +// useSecurePort replaces the default port with HTTPS port (8112). +// TODO: endpointsToDests() should support HTTPS instead of this overwrite but it needs metadata request to be encrypted. +// This code should be removed when https://github.com/knative/serving/issues/12821 was solved. +func useSecurePort(target string) string { + target = strings.Split(target, ":")[0] + return target + ":" + strconv.Itoa(networking.BackendHTTPSPort) +} diff --git a/pkg/activator/handler/handler_test.go b/pkg/activator/handler/handler_test.go index 33ef6a571d6a..2481675391d6 100644 --- a/pkg/activator/handler/handler_test.go +++ b/pkg/activator/handler/handler_test.go @@ -123,7 +123,7 @@ func TestActivationHandler(t *testing.T) { ctx, cancel, _ := rtesting.SetupFakeContextWithCancel(t) defer cancel() - handler := New(ctx, test.throttler, rt, false /*usePassthroughLb*/, logging.FromContext(ctx)) + handler := New(ctx, test.throttler, rt, false /*usePassthroughLb*/, logging.FromContext(ctx), false /* TLS */) resp := httptest.NewRecorder() req := httptest.NewRequest(http.MethodPost, "http://example.com", nil) @@ -162,7 +162,7 @@ func TestActivationHandlerProxyHeader(t *testing.T) { ctx, cancel, _ := rtesting.SetupFakeContextWithCancel(t) defer cancel() - handler := New(ctx, fakeThrottler{}, rt, false /*usePassthroughLb*/, logging.FromContext(ctx)) + handler := New(ctx, fakeThrottler{}, rt, false /*usePassthroughLb*/, logging.FromContext(ctx), false /* TLS */) writer := httptest.NewRecorder() req := httptest.NewRequest(http.MethodPost, "http://example.com", nil) @@ -195,7 +195,7 @@ func TestActivationHandlerPassthroughLb(t *testing.T) { ctx, cancel, _ := rtesting.SetupFakeContextWithCancel(t) defer cancel() - handler := New(ctx, fakeThrottler{}, rt, true /*usePassthroughLb*/, logging.FromContext(ctx)) + handler := New(ctx, fakeThrottler{}, rt, true /*usePassthroughLb*/, logging.FromContext(ctx), false /* TLS */) writer := httptest.NewRecorder() req := httptest.NewRequest(http.MethodPost, "http://example.com", nil) @@ -276,7 +276,7 @@ func TestActivationHandlerTraceSpans(t *testing.T) { oct.Finish() }() - handler := New(ctx, fakeThrottler{}, rt, false /*usePassthroughLb*/, logging.FromContext(ctx)) + handler := New(ctx, fakeThrottler{}, rt, false /*usePassthroughLb*/, logging.FromContext(ctx), false /* TLS */) // Set up config store to populate context. configStore := setupConfigStore(t, logging.FromContext(ctx)) @@ -345,7 +345,7 @@ func BenchmarkHandler(b *testing.B) { }, nil }) - handler := New(ctx, fakeThrottler{}, rt, false /*usePassthroughLb*/, logging.FromContext(ctx)) + handler := New(ctx, fakeThrottler{}, rt, false /*usePassthroughLb*/, logging.FromContext(ctx), false /* TLS */) request := func() *http.Request { req := httptest.NewRequest(http.MethodGet, "http://example.com", nil) diff --git a/pkg/activator/handler/main_test.go b/pkg/activator/handler/main_test.go index f8fe4edfea75..98b44b244413 100644 --- a/pkg/activator/handler/main_test.go +++ b/pkg/activator/handler/main_test.go @@ -69,7 +69,7 @@ func BenchmarkHandlerChain(b *testing.B) { }) // Make sure to update this if the activator's main file changes. - ah := New(ctx, fakeThrottler{}, rt, false, logger) + ah := New(ctx, fakeThrottler{}, rt, false, logger, false /* TLS */) ah = concurrencyReporter.Handler(ah) ah = NewTracingHandler(ah) ah, _ = pkghttp.NewRequestLogHandler(ah, io.Discard, "", nil, false) diff --git a/pkg/http/proxy.go b/pkg/http/proxy.go index 81805cedbde5..d5c252c37be5 100644 --- a/pkg/http/proxy.go +++ b/pkg/http/proxy.go @@ -33,10 +33,14 @@ const NoHostOverride = "" // If hostOverride is not an empty string, the outgoing request's Host header will be // replaced with that explicit value and the passthrough loadbalancing header will be // set to enable pod-addressability. -func NewHeaderPruningReverseProxy(target, hostOverride string, headersToRemove []string) *httputil.ReverseProxy { +func NewHeaderPruningReverseProxy(target, hostOverride string, headersToRemove []string, useHTTPS bool) *httputil.ReverseProxy { return &httputil.ReverseProxy{ Director: func(req *http.Request) { - req.URL.Scheme = "http" + if useHTTPS { + req.URL.Scheme = "https" + } else { + req.URL.Scheme = "http" + } req.URL.Host = target if hostOverride != NoHostOverride { diff --git a/pkg/http/proxy_test.go b/pkg/http/proxy_test.go index a5e40bd3afac..83b74300b37e 100644 --- a/pkg/http/proxy_test.go +++ b/pkg/http/proxy_test.go @@ -17,6 +17,8 @@ limitations under the License. package http import ( + "crypto/tls" + "crypto/x509" "encoding/json" "net/http" "net/http/httptest" @@ -89,7 +91,7 @@ func TestNewHeaderPruningProxy(t *testing.T) { proxy := NewHeaderPruningReverseProxy(serverURL.Host, test.host, []string{ "header-to-remove-1", "header-to-remove-2", - }) + }, false /* use HTTPS */) resp := httptest.NewRecorder() req := httptest.NewRequest(http.MethodPost, test.url, nil) @@ -113,3 +115,96 @@ func TestNewHeaderPruningProxy(t *testing.T) { }) } } + +func TestNewHeaderPruningProxyHTTPS(t *testing.T) { + var handler http.HandlerFunc = func(w http.ResponseWriter, r *http.Request) { + r.Header.Add("Host", r.Host) // Explicitly add the host header so we can assert. + if err := json.NewEncoder(w).Encode(r.Header); err != nil { + panic(err) + } + } + + server := httptest.NewTLSServer(handler) + serverURL, _ := url.Parse(server.URL) + defer server.Close() + + rootCAs := x509.NewCertPool() + rootCAs.AddCert(server.Certificate()) + tlsConf := &tls.Config{ + MinVersion: tls.VersionTLS12, + RootCAs: rootCAs, + } + + tests := []struct { + name string + url string + host string + header http.Header + expectHeaders http.Header + }{{ + name: "prunes activator headers, does not add user agent header", + url: "https://example.com/", + header: http.Header{ + "Header-Not-To-Remove": []string{"value"}, + "Header-To-Remove-1": []string{"some-value"}, + "Header-To-Remove-2": []string{"some-value"}, + }, + expectHeaders: http.Header{ + "Host": []string{"example.com"}, + "Header-Not-To-Remove": []string{"value"}, + }, + }, { + name: "explicit user agent header not removed", + url: "https://example.com/", + header: http.Header{ + network.UserAgentKey: []string{"gold"}, + }, + expectHeaders: http.Header{ + "Host": []string{"example.com"}, + network.UserAgentKey: []string{"gold"}, + }, + }, { + name: "overrides host header", + url: "https://example.com/", + host: "foo.bar", + header: http.Header{ + network.UserAgentKey: []string{"gold"}, + }, + expectHeaders: http.Header{ + "Host": []string{"foo.bar"}, + networking.PassthroughLoadbalancingHeaderName: []string{"true"}, + network.UserAgentKey: []string{"gold"}, + }, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + proxy := NewHeaderPruningReverseProxy(serverURL.Host, test.host, []string{ + "header-to-remove-1", + "header-to-remove-2", + }, true /* use HTTPS */) + + resp := httptest.NewRecorder() + + req := httptest.NewRequest(http.MethodPost, test.url, nil) + req.Header = test.header + + proxy.Transport = &http.Transport{TLSClientConfig: tlsConf} + proxy.ServeHTTP(resp, req) + + var proxiedHeaders http.Header + if err := json.NewDecoder(resp.Body).Decode(&proxiedHeaders); err != nil { + t.Fatalf("Decode = %v", err) + } + + // Remove headers golang adds from consideration. + for _, k := range []string{"Accept-Encoding", "Content-Length", "X-Forwarded-For"} { + proxiedHeaders.Del(k) + } + + if got, want := proxiedHeaders, test.expectHeaders; !cmp.Equal(want, got) { + t.Errorf("Got Headers=%v, want: %v; diff: %s", got, want, cmp.Diff(want, got)) + } + }) + } +} diff --git a/pkg/queue/constants.go b/pkg/queue/constants.go index 4d6b6f0573b0..4d08db5c1a86 100644 --- a/pkg/queue/constants.go +++ b/pkg/queue/constants.go @@ -26,4 +26,7 @@ const ( // Main usage is to delay the termination of user-container until all // accepted requests have been processed. RequestQueueDrainPath = "/wait-for-drain" + + // CertDirectory is the name of the directory path where certificates are stored. + CertDirectory = "/var/lib/knative/certs" ) diff --git a/pkg/reconciler/revision/resources/deploy.go b/pkg/reconciler/revision/resources/deploy.go index 997fc9715ad4..4a58e1b527b3 100644 --- a/pkg/reconciler/revision/resources/deploy.go +++ b/pkg/reconciler/revision/resources/deploy.go @@ -70,6 +70,12 @@ var ( }, } + certVolumeMount = corev1.VolumeMount{ + MountPath: queue.CertDirectory, + Name: "server-certs", + ReadOnly: true, + } + varTokenVolumeMount = corev1.VolumeMount{ Name: varTokenVolume.Name, MountPath: concurrencyStateTokenVolumeMountPath, @@ -89,6 +95,17 @@ var ( } ) +func certVolume(secret string) corev1.Volume { + return corev1.Volume{ + Name: "server-certs", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: secret, + }, + }, + } +} + func rewriteUserProbe(p *corev1.Probe, userPort int) { if p == nil { return @@ -122,6 +139,11 @@ func makePodSpec(rev *v1.Revision, cfg *config.Config) (*corev1.PodSpec, error) extraVolumes = append(extraVolumes, varTokenVolume) } + if cfg.Network.QueueProxyCertSecret != "" { + queueContainer.VolumeMounts = append(queueContainer.VolumeMounts, certVolumeMount) + extraVolumes = append(extraVolumes, certVolume(cfg.Network.QueueProxyCertSecret)) + } + podSpec := BuildPodSpec(rev, append(BuildUserContainers(rev), *queueContainer), cfg) podSpec.Volumes = append(podSpec.Volumes, extraVolumes...) diff --git a/pkg/reconciler/revision/resources/deploy_test.go b/pkg/reconciler/revision/resources/deploy_test.go index 67ed5fe3d62d..f0a8a8dedd68 100644 --- a/pkg/reconciler/revision/resources/deploy_test.go +++ b/pkg/reconciler/revision/resources/deploy_test.go @@ -75,7 +75,7 @@ var ( defaultQueueContainer = &corev1.Container{ Name: QueueContainerName, Resources: createQueueResources(&deploymentConfig, make(map[string]string), &corev1.Container{}), - Ports: append(queueNonServingPorts, queueHTTPPort), + Ports: append(queueNonServingPorts, queueHTTPPort, queueHTTPSPort), ReadinessProbe: &corev1.Probe{ ProbeHandler: corev1.ProbeHandler{ HTTPGet: &corev1.HTTPGetAction{ @@ -103,6 +103,9 @@ var ( }, { Name: "QUEUE_SERVING_PORT", Value: "8012", + }, { + Name: "QUEUE_SERVING_TLS_PORT", + Value: "8112", }, { Name: "CONTAINER_CONCURRENCY", Value: "0", diff --git a/pkg/reconciler/revision/resources/queue.go b/pkg/reconciler/revision/resources/queue.go index d5fe68df1918..675ed65597f1 100644 --- a/pkg/reconciler/revision/resources/queue.go +++ b/pkg/reconciler/revision/resources/queue.go @@ -44,9 +44,10 @@ import ( ) const ( - localAddress = "127.0.0.1" - requestQueueHTTPPortName = "queue-port" - profilingPortName = "profiling-port" + localAddress = "127.0.0.1" + requestQueueHTTPPortName = "queue-port" + requestQueueHTTPSPortName = "https-port" // must be no more than 15 characters. + profilingPortName = "profiling-port" ) var ( @@ -58,6 +59,10 @@ var ( Name: requestQueueHTTPPortName, ContainerPort: networking.BackendHTTP2Port, } + queueHTTPSPort = corev1.ContainerPort{ + Name: requestQueueHTTPSPortName, + ContainerPort: networking.BackendHTTPSPort, + } queueNonServingPorts = []corev1.ContainerPort{{ // Provides health checks and lifecycle hooks. Name: v1.QueueAdminPortName, @@ -202,7 +207,7 @@ func makeQueueContainer(rev *v1.Revision, cfg *config.Config) (*corev1.Container if rev.GetProtocol() == pkgnet.ProtocolH2C { servingPort = queueHTTP2Port } - ports = append(ports, servingPort) + ports = append(ports, servingPort, queueHTTPSPort) container := rev.Spec.GetContainer() @@ -269,6 +274,9 @@ func makeQueueContainer(rev *v1.Revision, cfg *config.Config) (*corev1.Container }, { Name: "QUEUE_SERVING_PORT", Value: strconv.Itoa(int(servingPort.ContainerPort)), + }, { + Name: "QUEUE_SERVING_TLS_PORT", + Value: strconv.Itoa(int(queueHTTPSPort.ContainerPort)), }, { Name: "CONTAINER_CONCURRENCY", Value: strconv.Itoa(int(rev.Spec.GetContainerConcurrency())), diff --git a/pkg/reconciler/revision/resources/queue_test.go b/pkg/reconciler/revision/resources/queue_test.go index 7066f0b41c2a..9e82f6eaf708 100644 --- a/pkg/reconciler/revision/resources/queue_test.go +++ b/pkg/reconciler/revision/resources/queue_test.go @@ -124,7 +124,7 @@ func TestMakeQueueContainer(t *testing.T) { }, want: queueContainer(func(c *corev1.Container) { c.Image = "alpine" - c.Ports = append(queueNonServingPorts, queueHTTP2Port) + c.Ports = append(queueNonServingPorts, queueHTTP2Port, queueHTTPSPort) c.ReadinessProbe.ProbeHandler.HTTPGet.Port.IntVal = queueHTTP2Port.ContainerPort c.Env = env(map[string]string{ "USER_PORT": "1955", @@ -147,7 +147,7 @@ func TestMakeQueueContainer(t *testing.T) { }, want: queueContainer(func(c *corev1.Container) { c.Image = "alpine" - c.Ports = append(queueNonServingPorts, queueHTTP2Port) + c.Ports = append(queueNonServingPorts, queueHTTP2Port, queueHTTPSPort) c.ReadinessProbe.ProbeHandler.HTTPGet.Port.IntVal = queueHTTP2Port.ContainerPort c.Env = env(map[string]string{ "USER_PORT": "1955", @@ -269,7 +269,7 @@ func TestMakeQueueContainer(t *testing.T) { c.Env = env(map[string]string{ "ENABLE_PROFILING": "true", }) - c.Ports = append(queueNonServingPorts, profilingPort, queueHTTPPort) + c.Ports = append(queueNonServingPorts, profilingPort, queueHTTPPort, queueHTTPSPort) }), }, { name: "custom TimeoutSeconds", @@ -885,6 +885,7 @@ var defaultEnv = map[string]string{ "METRICS_DOMAIN": metrics.Domain(), "METRICS_COLLECTOR_ADDRESS": "", "QUEUE_SERVING_PORT": "8012", + "QUEUE_SERVING_TLS_PORT": "8112", "REVISION_TIMEOUT_SECONDS": "45", "SERVING_CONFIGURATION": "", "SERVING_ENABLE_PROBE_REQUEST_LOG": "false", diff --git a/pkg/reconciler/revision/table_test.go b/pkg/reconciler/revision/table_test.go index b84631e4bbab..adc1bb337bef 100644 --- a/pkg/reconciler/revision/table_test.go +++ b/pkg/reconciler/revision/table_test.go @@ -30,6 +30,7 @@ import ( caching "knative.dev/caching/pkg/apis/caching/v1alpha1" cachingclient "knative.dev/caching/pkg/client/injection/client" + network "knative.dev/networking/pkg" "knative.dev/networking/pkg/apis/networking" kubeclient "knative.dev/pkg/client/injection/kube/client" "knative.dev/pkg/configmap" @@ -891,5 +892,6 @@ func reconcilerTestConfig() *config.Config { }, Logging: &logging.Config{}, Tracing: &tracingconfig.Config{}, + Network: &network.Config{}, } } diff --git a/test/config/tls/config-network.yaml b/test/config/tls/config-network.yaml index 6a3b909b8a48..ae048d23acbc 100644 --- a/test/config/tls/config-network.yaml +++ b/test/config/tls/config-network.yaml @@ -24,3 +24,6 @@ data: activator-ca: "serving-ca" activator-san: "knative" activator-cert-secret: "server-certs" + queue-proxy-ca: "serving-ca" + queue-proxy-san: "knative" + queue-proxy-cert-secret: "server-certs" diff --git a/test/e2e-common.sh b/test/e2e-common.sh index aabb0adff055..a261e9c65a01 100644 --- a/test/e2e-common.sh +++ b/test/e2e-common.sh @@ -366,7 +366,6 @@ function install() { kubectl apply -n ${SYSTEM_NAMESPACE} -f ${REPO_ROOT_DIR}/test/config/tls/config-network.yaml kubectl delete pod -n ${SYSTEM_NAMESPACE} -l app=activator fi - } # Check if we should use --resolvabledomain. In case the ingress only has diff --git a/test/generate-cert.sh b/test/generate-cert.sh index 00d9ce5d7b51..5569f166ad32 100755 --- a/test/generate-cert.sh +++ b/test/generate-cert.sh @@ -16,6 +16,7 @@ SYSTEM_NAMESPACE="${SYSTEM_NAMESPACE:-knative-serving}" TEST_NAMESPACE=serving-tests +TEST_NAMESPACE_ALT=serving-tests-alt out_dir="$(mktemp -d /tmp/certs-XXX)" san="knative" @@ -35,3 +36,12 @@ kubectl create -n ${SYSTEM_NAMESPACE} secret generic serving-ca \ kubectl create -n ${SYSTEM_NAMESPACE} secret tls server-certs \ --key="${out_dir}"/tls.key \ --cert="${out_dir}"/tls.crt --dry-run=client -o yaml | kubectl apply -f - + +# Create secrets for test namespaces +kubectl create -n ${TEST_NAMESPACE} secret tls server-certs \ + --key="${out_dir}"/tls.key \ + --cert="${out_dir}"/tls.crt --dry-run=client -o yaml | kubectl apply -f - + +kubectl create -n ${TEST_NAMESPACE_ALT} secret tls server-certs \ + --key="${out_dir}"/tls.key \ + --cert="${out_dir}"/tls.crt --dry-run=client -o yaml | kubectl apply -f -