From 51fb4f4a7d8bad41696b3368651f900810b358c7 Mon Sep 17 00:00:00 2001 From: Kenjiro Nakayama Date: Wed, 30 Mar 2022 11:43:24 +0900 Subject: [PATCH 1/5] All Hops Encrypted: TLS between activator and queue-Proxy Fix https://github.com/knative/serving/issues/12502 Fix https://github.com/knative/serving/issues/12503 --- cmd/activator/main.go | 32 +++++- cmd/queue/main.go | 62 ++++++++++-- pkg/activator/handler/handler.go | 24 ++++- pkg/activator/handler/handler_test.go | 10 +- pkg/activator/handler/main_test.go | 2 +- pkg/http/proxy.go | 4 +- pkg/http/proxy_test.go | 97 ++++++++++++++++++- pkg/queue/constants.go | 3 + pkg/reconciler/revision/resources/deploy.go | 22 +++++ .../revision/resources/deploy_test.go | 5 +- pkg/reconciler/revision/resources/queue.go | 16 ++- .../revision/resources/queue_test.go | 7 +- pkg/reconciler/revision/table_test.go | 2 + test/config/tls/config-network.yaml | 3 + test/e2e-common.sh | 1 - test/generate-cert.sh | 10 ++ 16 files changed, 272 insertions(+), 28 deletions(-) diff --git a/cmd/activator/main.go b/cmd/activator/main.go index 3d2633023d38..1ed857392229 100644 --- a/cmd/activator/main.go +++ b/cmd/activator/main.go @@ -19,6 +19,7 @@ package main import ( "context" "crypto/tls" + "crypto/x509" "errors" "fmt" "log" @@ -153,6 +154,32 @@ func main() { logger.Fatalw("Failed to construct network config", zap.Error(err)) } + // Enable TLS client when queue-proxy-ca is specified. + // At this moment activator with TLS does not disable HTTP. + // See also https://github.com/knative/serving/issues/12808. + if networkConfig.QueueProxyCA != "" && networkConfig.QueueProxySAN != "" { + caSecret, err := kubeClient.CoreV1().Secrets(system.Namespace()).Get(ctx, networkConfig.QueueProxyCA, metav1.GetOptions{}) + if err != nil { + logger.Fatalw("failed to get secret", zap.Error(err)) + } + pool, err := x509.SystemCertPool() + if err != nil { + pool = x509.NewCertPool() + } + + if ok := pool.AppendCertsFromPEM(caSecret.Data["ca.crt"]); !ok { + logger.Fatalw("failed to append ca cert to the RootCAs") + } + + tlsConf := &tls.Config{ + RootCAs: pool, + InsecureSkipVerify: false, + ServerName: networkConfig.QueueProxySAN, + MinVersion: tls.VersionTLS12, + } + transport = pkgnet.NewProxyAutoTLSTransport(env.MaxIdleProxyConns, env.MaxIdleProxyConnsPerHost, tlsConf) + } + // Start throttler. throttler := activatornet.NewThrottler(ctx, env.PodIP) go throttler.Run(ctx, transport, networkConfig.EnableMeshPodAddressability, networkConfig.MeshCompatibilityMode) @@ -186,9 +213,12 @@ func main() { concurrencyReporter := activatorhandler.NewConcurrencyReporter(ctx, env.PodName, statCh) go concurrencyReporter.Run(ctx.Done()) + // Enable TLS against queue-proxy when the CA and SA are specified. + tlsEnabled := networkConfig.QueueProxyCA != "" && networkConfig.QueueProxySAN != "" + // Create activation handler chain // Note: innermost handlers are specified first, ie. the last handler in the chain will be executed first - ah := activatorhandler.New(ctx, throttler, transport, networkConfig.EnableMeshPodAddressability, logger) + ah := activatorhandler.New(ctx, throttler, transport, networkConfig.EnableMeshPodAddressability, logger, tlsEnabled) ah = concurrencyReporter.Handler(ah) ah = activatorhandler.NewTracingHandler(ah) reqLogHandler, err := pkghttp.NewRequestLogHandler(ah, logging.NewSyncFileWriter(os.Stdout), "", diff --git a/cmd/queue/main.go b/cmd/queue/main.go index 40a517d762be..52d31944d6de 100644 --- a/cmd/queue/main.go +++ b/cmd/queue/main.go @@ -62,11 +62,18 @@ const ( // This is to give networking a little bit more time to remove the pod // from its configuration and propagate that to all loadbalancers and nodes. drainSleepDuration = 30 * time.Second + + // certPath is the path for the server certificate mounted by queue-proxy. + certPath = queue.CertDirectory + "/tls.crt" + + // keyPath is the path for the server certificate key mounted by queue-proxy. + keyPath = queue.CertDirectory + "/tls.key" ) type config struct { ContainerConcurrency int `split_words:"true" required:"true"` QueueServingPort string `split_words:"true" required:"true"` + QueueServingTLSPort string `split_words:"true" required:"true"` UserPort string `split_words:"true" required:"true"` RevisionTimeoutSeconds int `split_words:"true" required:"true"` MaxDurationSeconds int `split_words:"true"` // optional @@ -162,15 +169,22 @@ func main() { if env.ConcurrencyStateEndpoint != "" { concurrencyendpoint = queue.NewConcurrencyEndpoint(env.ConcurrencyStateEndpoint, env.ConcurrencyStateTokenPath) } - mainServer, drain := buildServer(ctx, env, probe, stats, logger, concurrencyendpoint) + + // Enable TLS when certificate is mounted. + tlsEnabled := exists(logger, certPath) && exists(logger, keyPath) + + mainServer, drain := buildServer(ctx, env, probe, stats, logger, concurrencyendpoint, false) servers := map[string]*http.Server{ "main": mainServer, - "admin": buildAdminServer(logger, drain), "metrics": buildMetricsServer(promStatReporter, protoStatReporter), } if env.EnableProfiling { servers["profile"] = profiling.NewServer(profiling.NewHandler(logger, true)) } + // Create the admin server for non-TLS when TLS is not enabled otherwise the port is conflicted. + if !tlsEnabled { + servers["admin"] = buildAdminServer(logger, drain) + } errCh := make(chan error) for name, server := range servers { @@ -182,6 +196,25 @@ func main() { }(name, server) } + // Enable TLS server when activator server certs are mounted. + // At this moment activator with TLS does not disable HTTP. + // See also https://github.com/knative/serving/issues/12808. + if tlsEnabled { + mainTLSServer, drain := buildServer(ctx, env, probe, stats, logger, concurrencyendpoint, true /* enable TLS */) + tlsServers := map[string]*http.Server{ + "tlsMain": mainTLSServer, + "tlsAdmin": buildAdminServer(logger, drain), + } + for name, server := range tlsServers { + go func(name string, s *http.Server) { + // Don't forward ErrServerClosed as that indicates we're already shutting down. + if err := s.ListenAndServeTLS(certPath, keyPath); err != nil && !errors.Is(err, http.ErrServerClosed) { + errCh <- fmt.Errorf("%s server failed to serve: %w", name, err) + } + }(name, server) + } + } + // Blocks until we actually receive a TERM signal or one of the servers // exits unexpectedly. We fold both signals together because we only want // to act on the first of those to reach here. @@ -212,6 +245,14 @@ func main() { } } +func exists(logger *zap.SugaredLogger, filename string) bool { + _, err := os.Stat(filename) + if err != nil && !os.IsNotExist(err) { + logger.Fatalw(fmt.Sprintf("Failed to verify the file path %q", filename), zap.Error(err)) + } + return err == nil +} + func buildProbe(logger *zap.SugaredLogger, encodedProbe string, autodetectHTTP2 bool) *readiness.Probe { coreProbe, err := readiness.DecodeProbe(encodedProbe) if err != nil { @@ -224,18 +265,18 @@ func buildProbe(logger *zap.SugaredLogger, encodedProbe string, autodetectHTTP2 } func buildServer(ctx context.Context, env config, probeContainer func() bool, stats *network.RequestStats, logger *zap.SugaredLogger, - ce *queue.ConcurrencyEndpoint) (server *http.Server, drain func()) { + ce *queue.ConcurrencyEndpoint, enableTLS bool) (server *http.Server, drain func()) { target := net.JoinHostPort("127.0.0.1", env.UserPort) - httpProxy := pkghttp.NewHeaderPruningReverseProxy(target, pkghttp.NoHostOverride, activator.RevisionHeaders) + httpProxy := pkghttp.NewHeaderPruningReverseProxy(target, pkghttp.NoHostOverride, activator.RevisionHeaders, "http" /* use http to the target*/) httpProxy.Transport = buildTransport(env, logger) httpProxy.ErrorHandler = pkghandler.Error(logger) httpProxy.BufferPool = network.NewBufferPool() httpProxy.FlushInterval = network.FlushInterval breaker := buildBreaker(logger, env) - metricsSupported := supportsMetrics(ctx, logger, env) + metricsSupported := supportsMetrics(ctx, logger, env, enableTLS) tracingEnabled := env.TracingConfigBackend != tracingconfig.None concurrencyStateEnabled := env.ConcurrencyStateEndpoint != "" firstByteTimeout := time.Duration(env.RevisionTimeoutSeconds) * time.Second @@ -287,6 +328,10 @@ func buildServer(ctx context.Context, env config, probeContainer func() bool, st composedHandler = requestLogHandler(logger, composedHandler, env) } + if enableTLS { + return pkgnet.NewServer(":"+env.QueueServingTLSPort, composedHandler), drainer.Drain + } + return pkgnet.NewServer(":"+env.QueueServingPort, composedHandler), drainer.Drain } @@ -333,12 +378,15 @@ func buildBreaker(logger *zap.SugaredLogger, env config) *queue.Breaker { return queue.NewBreaker(params) } -func supportsMetrics(ctx context.Context, logger *zap.SugaredLogger, env config) bool { +func supportsMetrics(ctx context.Context, logger *zap.SugaredLogger, env config, enableTLS bool) bool { + // Metrics needs to be registered on either TLS server or non-TLS server. Give it away to the TLS server. + if enableTLS { + return false + } // Setup request metrics reporting for end-user metrics. if env.ServingRequestMetricsBackend == "" { return false } - if err := setupMetricsExporter(ctx, logger, env.ServingRequestMetricsBackend, env.MetricsCollectorAddress); err != nil { logger.Errorw("Error setting up request metrics exporter. Request metrics will be unavailable.", zap.Error(err)) return false diff --git a/pkg/activator/handler/handler.go b/pkg/activator/handler/handler.go index 85e72261e178..0ae8f57666f1 100644 --- a/pkg/activator/handler/handler.go +++ b/pkg/activator/handler/handler.go @@ -21,6 +21,8 @@ import ( "errors" "net/http" "net/http/httputil" + "strconv" + "strings" "go.opencensus.io/plugin/ochttp" "go.opencensus.io/trace" @@ -35,6 +37,7 @@ import ( "knative.dev/serving/pkg/activator" activatorconfig "knative.dev/serving/pkg/activator/config" pkghttp "knative.dev/serving/pkg/http" + "knative.dev/serving/pkg/networking" "knative.dev/serving/pkg/queue" "knative.dev/serving/pkg/reconciler/serverlessservice/resources/names" ) @@ -53,10 +56,11 @@ type activationHandler struct { throttler Throttler bufferPool httputil.BufferPool logger *zap.SugaredLogger + tls bool } // New constructs a new http.Handler that deals with revision activation. -func New(_ context.Context, t Throttler, transport http.RoundTripper, usePassthroughLb bool, logger *zap.SugaredLogger) http.Handler { +func New(_ context.Context, t Throttler, transport http.RoundTripper, usePassthroughLb bool, logger *zap.SugaredLogger, tlsEnabled bool) http.Handler { return &activationHandler{ transport: transport, tracingTransport: &ochttp.Transport{ @@ -67,6 +71,7 @@ func New(_ context.Context, t Throttler, transport http.RoundTripper, usePassthr throttler: t, bufferPool: network.NewBufferPool(), logger: logger, + tls: tlsEnabled, } } @@ -116,7 +121,14 @@ func (a *activationHandler) proxyRequest(revID types.NamespacedName, w http.Resp if usePassthroughLb { hostOverride = names.PrivateService(revID.Name) + "." + revID.Namespace } - proxy := pkghttp.NewHeaderPruningReverseProxy(target, hostOverride, activator.RevisionHeaders) + + var proxy *httputil.ReverseProxy + if a.tls { + proxy = pkghttp.NewHeaderPruningReverseProxy(useSecurePort(target), hostOverride, activator.RevisionHeaders, "https" /* use https to the target */) + } else { + proxy = pkghttp.NewHeaderPruningReverseProxy(target, hostOverride, activator.RevisionHeaders, "http" /* use https to the target */) + } + proxy.BufferPool = a.bufferPool proxy.Transport = a.transport if tracingEnabled { @@ -129,3 +141,11 @@ func (a *activationHandler) proxyRequest(revID types.NamespacedName, w http.Resp proxy.ServeHTTP(w, r) } + +// useSecurePort replaces the default port with HTTPS port (8112). +// TODO: endpointsToDests() should support HTTPS instead of this overwrite but it needs metadata request to be encrypted. +// This code should be removed when https://github.com/knative/serving/issues/12821 was solved. +func useSecurePort(target string) string { + target = strings.Split(target, ":")[0] + return target + ":" + strconv.Itoa(networking.BackendHTTPSPort) +} diff --git a/pkg/activator/handler/handler_test.go b/pkg/activator/handler/handler_test.go index 33ef6a571d6a..2481675391d6 100644 --- a/pkg/activator/handler/handler_test.go +++ b/pkg/activator/handler/handler_test.go @@ -123,7 +123,7 @@ func TestActivationHandler(t *testing.T) { ctx, cancel, _ := rtesting.SetupFakeContextWithCancel(t) defer cancel() - handler := New(ctx, test.throttler, rt, false /*usePassthroughLb*/, logging.FromContext(ctx)) + handler := New(ctx, test.throttler, rt, false /*usePassthroughLb*/, logging.FromContext(ctx), false /* TLS */) resp := httptest.NewRecorder() req := httptest.NewRequest(http.MethodPost, "http://example.com", nil) @@ -162,7 +162,7 @@ func TestActivationHandlerProxyHeader(t *testing.T) { ctx, cancel, _ := rtesting.SetupFakeContextWithCancel(t) defer cancel() - handler := New(ctx, fakeThrottler{}, rt, false /*usePassthroughLb*/, logging.FromContext(ctx)) + handler := New(ctx, fakeThrottler{}, rt, false /*usePassthroughLb*/, logging.FromContext(ctx), false /* TLS */) writer := httptest.NewRecorder() req := httptest.NewRequest(http.MethodPost, "http://example.com", nil) @@ -195,7 +195,7 @@ func TestActivationHandlerPassthroughLb(t *testing.T) { ctx, cancel, _ := rtesting.SetupFakeContextWithCancel(t) defer cancel() - handler := New(ctx, fakeThrottler{}, rt, true /*usePassthroughLb*/, logging.FromContext(ctx)) + handler := New(ctx, fakeThrottler{}, rt, true /*usePassthroughLb*/, logging.FromContext(ctx), false /* TLS */) writer := httptest.NewRecorder() req := httptest.NewRequest(http.MethodPost, "http://example.com", nil) @@ -276,7 +276,7 @@ func TestActivationHandlerTraceSpans(t *testing.T) { oct.Finish() }() - handler := New(ctx, fakeThrottler{}, rt, false /*usePassthroughLb*/, logging.FromContext(ctx)) + handler := New(ctx, fakeThrottler{}, rt, false /*usePassthroughLb*/, logging.FromContext(ctx), false /* TLS */) // Set up config store to populate context. configStore := setupConfigStore(t, logging.FromContext(ctx)) @@ -345,7 +345,7 @@ func BenchmarkHandler(b *testing.B) { }, nil }) - handler := New(ctx, fakeThrottler{}, rt, false /*usePassthroughLb*/, logging.FromContext(ctx)) + handler := New(ctx, fakeThrottler{}, rt, false /*usePassthroughLb*/, logging.FromContext(ctx), false /* TLS */) request := func() *http.Request { req := httptest.NewRequest(http.MethodGet, "http://example.com", nil) diff --git a/pkg/activator/handler/main_test.go b/pkg/activator/handler/main_test.go index f8fe4edfea75..98b44b244413 100644 --- a/pkg/activator/handler/main_test.go +++ b/pkg/activator/handler/main_test.go @@ -69,7 +69,7 @@ func BenchmarkHandlerChain(b *testing.B) { }) // Make sure to update this if the activator's main file changes. - ah := New(ctx, fakeThrottler{}, rt, false, logger) + ah := New(ctx, fakeThrottler{}, rt, false, logger, false /* TLS */) ah = concurrencyReporter.Handler(ah) ah = NewTracingHandler(ah) ah, _ = pkghttp.NewRequestLogHandler(ah, io.Discard, "", nil, false) diff --git a/pkg/http/proxy.go b/pkg/http/proxy.go index 81805cedbde5..6054a20b2b83 100644 --- a/pkg/http/proxy.go +++ b/pkg/http/proxy.go @@ -33,10 +33,10 @@ const NoHostOverride = "" // If hostOverride is not an empty string, the outgoing request's Host header will be // replaced with that explicit value and the passthrough loadbalancing header will be // set to enable pod-addressability. -func NewHeaderPruningReverseProxy(target, hostOverride string, headersToRemove []string) *httputil.ReverseProxy { +func NewHeaderPruningReverseProxy(target, hostOverride string, headersToRemove []string, scheme string) *httputil.ReverseProxy { return &httputil.ReverseProxy{ Director: func(req *http.Request) { - req.URL.Scheme = "http" + req.URL.Scheme = scheme req.URL.Host = target if hostOverride != NoHostOverride { diff --git a/pkg/http/proxy_test.go b/pkg/http/proxy_test.go index a5e40bd3afac..5fb9a7ed3f2d 100644 --- a/pkg/http/proxy_test.go +++ b/pkg/http/proxy_test.go @@ -17,6 +17,8 @@ limitations under the License. package http import ( + "crypto/tls" + "crypto/x509" "encoding/json" "net/http" "net/http/httptest" @@ -89,7 +91,7 @@ func TestNewHeaderPruningProxy(t *testing.T) { proxy := NewHeaderPruningReverseProxy(serverURL.Host, test.host, []string{ "header-to-remove-1", "header-to-remove-2", - }) + }, "http") resp := httptest.NewRecorder() req := httptest.NewRequest(http.MethodPost, test.url, nil) @@ -113,3 +115,96 @@ func TestNewHeaderPruningProxy(t *testing.T) { }) } } + +func TestNewHeaderPruningProxyHTTPS(t *testing.T) { + var handler http.HandlerFunc = func(w http.ResponseWriter, r *http.Request) { + r.Header.Add("Host", r.Host) // Explicitly add the host header so we can assert. + if err := json.NewEncoder(w).Encode(r.Header); err != nil { + panic(err) + } + } + + server := httptest.NewTLSServer(handler) + serverURL, _ := url.Parse(server.URL) + defer server.Close() + + rootCAs := x509.NewCertPool() + rootCAs.AddCert(server.Certificate()) + tlsConf := &tls.Config{ + MinVersion: tls.VersionTLS12, + RootCAs: rootCAs, + } + + tests := []struct { + name string + url string + host string + header http.Header + expectHeaders http.Header + }{{ + name: "prunes activator headers, does not add user agent header", + url: "https://example.com/", + header: http.Header{ + "Header-Not-To-Remove": []string{"value"}, + "Header-To-Remove-1": []string{"some-value"}, + "Header-To-Remove-2": []string{"some-value"}, + }, + expectHeaders: http.Header{ + "Host": []string{"example.com"}, + "Header-Not-To-Remove": []string{"value"}, + }, + }, { + name: "explicit user agent header not removed", + url: "https://example.com/", + header: http.Header{ + network.UserAgentKey: []string{"gold"}, + }, + expectHeaders: http.Header{ + "Host": []string{"example.com"}, + network.UserAgentKey: []string{"gold"}, + }, + }, { + name: "overrides host header", + url: "https://example.com/", + host: "foo.bar", + header: http.Header{ + network.UserAgentKey: []string{"gold"}, + }, + expectHeaders: http.Header{ + "Host": []string{"foo.bar"}, + networking.PassthroughLoadbalancingHeaderName: []string{"true"}, + network.UserAgentKey: []string{"gold"}, + }, + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + proxy := NewHeaderPruningReverseProxy(serverURL.Host, test.host, []string{ + "header-to-remove-1", + "header-to-remove-2", + }, "https") + + resp := httptest.NewRecorder() + + req := httptest.NewRequest(http.MethodPost, test.url, nil) + req.Header = test.header + + proxy.Transport = &http.Transport{TLSClientConfig: tlsConf} + proxy.ServeHTTP(resp, req) + + var proxiedHeaders http.Header + if err := json.NewDecoder(resp.Body).Decode(&proxiedHeaders); err != nil { + t.Fatalf("Decode = %v", err) + } + + // Remove headers golang adds from consideration. + for _, k := range []string{"Accept-Encoding", "Content-Length", "X-Forwarded-For"} { + proxiedHeaders.Del(k) + } + + if got, want := proxiedHeaders, test.expectHeaders; !cmp.Equal(want, got) { + t.Errorf("Got Headers=%v, want: %v; diff: %s", got, want, cmp.Diff(want, got)) + } + }) + } +} diff --git a/pkg/queue/constants.go b/pkg/queue/constants.go index 4d6b6f0573b0..4d08db5c1a86 100644 --- a/pkg/queue/constants.go +++ b/pkg/queue/constants.go @@ -26,4 +26,7 @@ const ( // Main usage is to delay the termination of user-container until all // accepted requests have been processed. RequestQueueDrainPath = "/wait-for-drain" + + // CertDirectory is the name of the directory path where certificates are stored. + CertDirectory = "/var/lib/knative/certs" ) diff --git a/pkg/reconciler/revision/resources/deploy.go b/pkg/reconciler/revision/resources/deploy.go index 997fc9715ad4..4a58e1b527b3 100644 --- a/pkg/reconciler/revision/resources/deploy.go +++ b/pkg/reconciler/revision/resources/deploy.go @@ -70,6 +70,12 @@ var ( }, } + certVolumeMount = corev1.VolumeMount{ + MountPath: queue.CertDirectory, + Name: "server-certs", + ReadOnly: true, + } + varTokenVolumeMount = corev1.VolumeMount{ Name: varTokenVolume.Name, MountPath: concurrencyStateTokenVolumeMountPath, @@ -89,6 +95,17 @@ var ( } ) +func certVolume(secret string) corev1.Volume { + return corev1.Volume{ + Name: "server-certs", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: secret, + }, + }, + } +} + func rewriteUserProbe(p *corev1.Probe, userPort int) { if p == nil { return @@ -122,6 +139,11 @@ func makePodSpec(rev *v1.Revision, cfg *config.Config) (*corev1.PodSpec, error) extraVolumes = append(extraVolumes, varTokenVolume) } + if cfg.Network.QueueProxyCertSecret != "" { + queueContainer.VolumeMounts = append(queueContainer.VolumeMounts, certVolumeMount) + extraVolumes = append(extraVolumes, certVolume(cfg.Network.QueueProxyCertSecret)) + } + podSpec := BuildPodSpec(rev, append(BuildUserContainers(rev), *queueContainer), cfg) podSpec.Volumes = append(podSpec.Volumes, extraVolumes...) diff --git a/pkg/reconciler/revision/resources/deploy_test.go b/pkg/reconciler/revision/resources/deploy_test.go index 67ed5fe3d62d..f0a8a8dedd68 100644 --- a/pkg/reconciler/revision/resources/deploy_test.go +++ b/pkg/reconciler/revision/resources/deploy_test.go @@ -75,7 +75,7 @@ var ( defaultQueueContainer = &corev1.Container{ Name: QueueContainerName, Resources: createQueueResources(&deploymentConfig, make(map[string]string), &corev1.Container{}), - Ports: append(queueNonServingPorts, queueHTTPPort), + Ports: append(queueNonServingPorts, queueHTTPPort, queueHTTPSPort), ReadinessProbe: &corev1.Probe{ ProbeHandler: corev1.ProbeHandler{ HTTPGet: &corev1.HTTPGetAction{ @@ -103,6 +103,9 @@ var ( }, { Name: "QUEUE_SERVING_PORT", Value: "8012", + }, { + Name: "QUEUE_SERVING_TLS_PORT", + Value: "8112", }, { Name: "CONTAINER_CONCURRENCY", Value: "0", diff --git a/pkg/reconciler/revision/resources/queue.go b/pkg/reconciler/revision/resources/queue.go index d5fe68df1918..675ed65597f1 100644 --- a/pkg/reconciler/revision/resources/queue.go +++ b/pkg/reconciler/revision/resources/queue.go @@ -44,9 +44,10 @@ import ( ) const ( - localAddress = "127.0.0.1" - requestQueueHTTPPortName = "queue-port" - profilingPortName = "profiling-port" + localAddress = "127.0.0.1" + requestQueueHTTPPortName = "queue-port" + requestQueueHTTPSPortName = "https-port" // must be no more than 15 characters. + profilingPortName = "profiling-port" ) var ( @@ -58,6 +59,10 @@ var ( Name: requestQueueHTTPPortName, ContainerPort: networking.BackendHTTP2Port, } + queueHTTPSPort = corev1.ContainerPort{ + Name: requestQueueHTTPSPortName, + ContainerPort: networking.BackendHTTPSPort, + } queueNonServingPorts = []corev1.ContainerPort{{ // Provides health checks and lifecycle hooks. Name: v1.QueueAdminPortName, @@ -202,7 +207,7 @@ func makeQueueContainer(rev *v1.Revision, cfg *config.Config) (*corev1.Container if rev.GetProtocol() == pkgnet.ProtocolH2C { servingPort = queueHTTP2Port } - ports = append(ports, servingPort) + ports = append(ports, servingPort, queueHTTPSPort) container := rev.Spec.GetContainer() @@ -269,6 +274,9 @@ func makeQueueContainer(rev *v1.Revision, cfg *config.Config) (*corev1.Container }, { Name: "QUEUE_SERVING_PORT", Value: strconv.Itoa(int(servingPort.ContainerPort)), + }, { + Name: "QUEUE_SERVING_TLS_PORT", + Value: strconv.Itoa(int(queueHTTPSPort.ContainerPort)), }, { Name: "CONTAINER_CONCURRENCY", Value: strconv.Itoa(int(rev.Spec.GetContainerConcurrency())), diff --git a/pkg/reconciler/revision/resources/queue_test.go b/pkg/reconciler/revision/resources/queue_test.go index 7066f0b41c2a..9e82f6eaf708 100644 --- a/pkg/reconciler/revision/resources/queue_test.go +++ b/pkg/reconciler/revision/resources/queue_test.go @@ -124,7 +124,7 @@ func TestMakeQueueContainer(t *testing.T) { }, want: queueContainer(func(c *corev1.Container) { c.Image = "alpine" - c.Ports = append(queueNonServingPorts, queueHTTP2Port) + c.Ports = append(queueNonServingPorts, queueHTTP2Port, queueHTTPSPort) c.ReadinessProbe.ProbeHandler.HTTPGet.Port.IntVal = queueHTTP2Port.ContainerPort c.Env = env(map[string]string{ "USER_PORT": "1955", @@ -147,7 +147,7 @@ func TestMakeQueueContainer(t *testing.T) { }, want: queueContainer(func(c *corev1.Container) { c.Image = "alpine" - c.Ports = append(queueNonServingPorts, queueHTTP2Port) + c.Ports = append(queueNonServingPorts, queueHTTP2Port, queueHTTPSPort) c.ReadinessProbe.ProbeHandler.HTTPGet.Port.IntVal = queueHTTP2Port.ContainerPort c.Env = env(map[string]string{ "USER_PORT": "1955", @@ -269,7 +269,7 @@ func TestMakeQueueContainer(t *testing.T) { c.Env = env(map[string]string{ "ENABLE_PROFILING": "true", }) - c.Ports = append(queueNonServingPorts, profilingPort, queueHTTPPort) + c.Ports = append(queueNonServingPorts, profilingPort, queueHTTPPort, queueHTTPSPort) }), }, { name: "custom TimeoutSeconds", @@ -885,6 +885,7 @@ var defaultEnv = map[string]string{ "METRICS_DOMAIN": metrics.Domain(), "METRICS_COLLECTOR_ADDRESS": "", "QUEUE_SERVING_PORT": "8012", + "QUEUE_SERVING_TLS_PORT": "8112", "REVISION_TIMEOUT_SECONDS": "45", "SERVING_CONFIGURATION": "", "SERVING_ENABLE_PROBE_REQUEST_LOG": "false", diff --git a/pkg/reconciler/revision/table_test.go b/pkg/reconciler/revision/table_test.go index b84631e4bbab..adc1bb337bef 100644 --- a/pkg/reconciler/revision/table_test.go +++ b/pkg/reconciler/revision/table_test.go @@ -30,6 +30,7 @@ import ( caching "knative.dev/caching/pkg/apis/caching/v1alpha1" cachingclient "knative.dev/caching/pkg/client/injection/client" + network "knative.dev/networking/pkg" "knative.dev/networking/pkg/apis/networking" kubeclient "knative.dev/pkg/client/injection/kube/client" "knative.dev/pkg/configmap" @@ -891,5 +892,6 @@ func reconcilerTestConfig() *config.Config { }, Logging: &logging.Config{}, Tracing: &tracingconfig.Config{}, + Network: &network.Config{}, } } diff --git a/test/config/tls/config-network.yaml b/test/config/tls/config-network.yaml index 6a3b909b8a48..ae048d23acbc 100644 --- a/test/config/tls/config-network.yaml +++ b/test/config/tls/config-network.yaml @@ -24,3 +24,6 @@ data: activator-ca: "serving-ca" activator-san: "knative" activator-cert-secret: "server-certs" + queue-proxy-ca: "serving-ca" + queue-proxy-san: "knative" + queue-proxy-cert-secret: "server-certs" diff --git a/test/e2e-common.sh b/test/e2e-common.sh index aabb0adff055..a261e9c65a01 100644 --- a/test/e2e-common.sh +++ b/test/e2e-common.sh @@ -366,7 +366,6 @@ function install() { kubectl apply -n ${SYSTEM_NAMESPACE} -f ${REPO_ROOT_DIR}/test/config/tls/config-network.yaml kubectl delete pod -n ${SYSTEM_NAMESPACE} -l app=activator fi - } # Check if we should use --resolvabledomain. In case the ingress only has diff --git a/test/generate-cert.sh b/test/generate-cert.sh index 00d9ce5d7b51..5569f166ad32 100755 --- a/test/generate-cert.sh +++ b/test/generate-cert.sh @@ -16,6 +16,7 @@ SYSTEM_NAMESPACE="${SYSTEM_NAMESPACE:-knative-serving}" TEST_NAMESPACE=serving-tests +TEST_NAMESPACE_ALT=serving-tests-alt out_dir="$(mktemp -d /tmp/certs-XXX)" san="knative" @@ -35,3 +36,12 @@ kubectl create -n ${SYSTEM_NAMESPACE} secret generic serving-ca \ kubectl create -n ${SYSTEM_NAMESPACE} secret tls server-certs \ --key="${out_dir}"/tls.key \ --cert="${out_dir}"/tls.crt --dry-run=client -o yaml | kubectl apply -f - + +# Create secrets for test namespaces +kubectl create -n ${TEST_NAMESPACE} secret tls server-certs \ + --key="${out_dir}"/tls.key \ + --cert="${out_dir}"/tls.crt --dry-run=client -o yaml | kubectl apply -f - + +kubectl create -n ${TEST_NAMESPACE_ALT} secret tls server-certs \ + --key="${out_dir}"/tls.key \ + --cert="${out_dir}"/tls.crt --dry-run=client -o yaml | kubectl apply -f - From 10c75f7ddd37d83033e69f4d92b6035cc1fd0fa6 Mon Sep 17 00:00:00 2001 From: Kenjiro Nakayama Date: Fri, 15 Apr 2022 11:44:15 +0900 Subject: [PATCH 2/5] Large capital --- cmd/activator/main.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/activator/main.go b/cmd/activator/main.go index 1ed857392229..a896685af93b 100644 --- a/cmd/activator/main.go +++ b/cmd/activator/main.go @@ -160,7 +160,7 @@ func main() { if networkConfig.QueueProxyCA != "" && networkConfig.QueueProxySAN != "" { caSecret, err := kubeClient.CoreV1().Secrets(system.Namespace()).Get(ctx, networkConfig.QueueProxyCA, metav1.GetOptions{}) if err != nil { - logger.Fatalw("failed to get secret", zap.Error(err)) + logger.Fatalw("Failed to get secret", zap.Error(err)) } pool, err := x509.SystemCertPool() if err != nil { @@ -168,7 +168,7 @@ func main() { } if ok := pool.AppendCertsFromPEM(caSecret.Data["ca.crt"]); !ok { - logger.Fatalw("failed to append ca cert to the RootCAs") + logger.Fatalw("Failed to append ca cert to the RootCAs") } tlsConf := &tls.Config{ From f37d372a65deafd6da1042b7440420ed3854ae27 Mon Sep 17 00:00:00 2001 From: Kenjiro Nakayama Date: Sun, 17 Apr 2022 23:14:10 +0900 Subject: [PATCH 3/5] Fix review comments --- cmd/activator/main.go | 9 +++++---- cmd/queue/main.go | 7 ++++--- pkg/activator/handler/handler.go | 4 ++-- pkg/http/proxy.go | 8 ++++++-- pkg/http/proxy_test.go | 4 ++-- 5 files changed, 19 insertions(+), 13 deletions(-) diff --git a/cmd/activator/main.go b/cmd/activator/main.go index a896685af93b..b3e561a1de43 100644 --- a/cmd/activator/main.go +++ b/cmd/activator/main.go @@ -154,14 +154,18 @@ func main() { logger.Fatalw("Failed to construct network config", zap.Error(err)) } + // Enable TLS against queue-proxy when the CA and SA are specified. + tlsEnabled := networkConfig.QueueProxyCA != "" && networkConfig.QueueProxySAN != "" + // Enable TLS client when queue-proxy-ca is specified. // At this moment activator with TLS does not disable HTTP. // See also https://github.com/knative/serving/issues/12808. - if networkConfig.QueueProxyCA != "" && networkConfig.QueueProxySAN != "" { + if tlsEnabled { caSecret, err := kubeClient.CoreV1().Secrets(system.Namespace()).Get(ctx, networkConfig.QueueProxyCA, metav1.GetOptions{}) if err != nil { logger.Fatalw("Failed to get secret", zap.Error(err)) } + pool, err := x509.SystemCertPool() if err != nil { pool = x509.NewCertPool() @@ -213,9 +217,6 @@ func main() { concurrencyReporter := activatorhandler.NewConcurrencyReporter(ctx, env.PodName, statCh) go concurrencyReporter.Run(ctx.Done()) - // Enable TLS against queue-proxy when the CA and SA are specified. - tlsEnabled := networkConfig.QueueProxyCA != "" && networkConfig.QueueProxySAN != "" - // Create activation handler chain // Note: innermost handlers are specified first, ie. the last handler in the chain will be executed first ah := activatorhandler.New(ctx, throttler, transport, networkConfig.EnableMeshPodAddressability, logger, tlsEnabled) diff --git a/cmd/queue/main.go b/cmd/queue/main.go index 52d31944d6de..f99f8567873e 100644 --- a/cmd/queue/main.go +++ b/cmd/queue/main.go @@ -181,7 +181,7 @@ func main() { if env.EnableProfiling { servers["profile"] = profiling.NewServer(profiling.NewHandler(logger, true)) } - // Create the admin server for non-TLS when TLS is not enabled otherwise the port is conflicted. + // Use TLS for the admin port as well when TLS is available. if !tlsEnabled { servers["admin"] = buildAdminServer(logger, drain) } @@ -266,10 +266,11 @@ func buildProbe(logger *zap.SugaredLogger, encodedProbe string, autodetectHTTP2 func buildServer(ctx context.Context, env config, probeContainer func() bool, stats *network.RequestStats, logger *zap.SugaredLogger, ce *queue.ConcurrencyEndpoint, enableTLS bool) (server *http.Server, drain func()) { + // TODO: If TLS is enabled, execute probes twice and tracking two different sets of container health. target := net.JoinHostPort("127.0.0.1", env.UserPort) - httpProxy := pkghttp.NewHeaderPruningReverseProxy(target, pkghttp.NoHostOverride, activator.RevisionHeaders, "http" /* use http to the target*/) + httpProxy := pkghttp.NewHeaderPruningReverseProxy(target, pkghttp.NoHostOverride, activator.RevisionHeaders, false /* use HTTP */) httpProxy.Transport = buildTransport(env, logger) httpProxy.ErrorHandler = pkghandler.Error(logger) httpProxy.BufferPool = network.NewBufferPool() @@ -379,7 +380,7 @@ func buildBreaker(logger *zap.SugaredLogger, env config) *queue.Breaker { } func supportsMetrics(ctx context.Context, logger *zap.SugaredLogger, env config, enableTLS bool) bool { - // Metrics needs to be registered on either TLS server or non-TLS server. Give it away to the TLS server. + // Keep it on HTTP because Metrics needs to be registered on either TLS server or non-TLS server. if enableTLS { return false } diff --git a/pkg/activator/handler/handler.go b/pkg/activator/handler/handler.go index 0ae8f57666f1..c4183ae504da 100644 --- a/pkg/activator/handler/handler.go +++ b/pkg/activator/handler/handler.go @@ -124,9 +124,9 @@ func (a *activationHandler) proxyRequest(revID types.NamespacedName, w http.Resp var proxy *httputil.ReverseProxy if a.tls { - proxy = pkghttp.NewHeaderPruningReverseProxy(useSecurePort(target), hostOverride, activator.RevisionHeaders, "https" /* use https to the target */) + proxy = pkghttp.NewHeaderPruningReverseProxy(useSecurePort(target), hostOverride, activator.RevisionHeaders, true /* uss HTTPS */) } else { - proxy = pkghttp.NewHeaderPruningReverseProxy(target, hostOverride, activator.RevisionHeaders, "http" /* use https to the target */) + proxy = pkghttp.NewHeaderPruningReverseProxy(target, hostOverride, activator.RevisionHeaders, false /* use HTTPS */) } proxy.BufferPool = a.bufferPool diff --git a/pkg/http/proxy.go b/pkg/http/proxy.go index 6054a20b2b83..d5c252c37be5 100644 --- a/pkg/http/proxy.go +++ b/pkg/http/proxy.go @@ -33,10 +33,14 @@ const NoHostOverride = "" // If hostOverride is not an empty string, the outgoing request's Host header will be // replaced with that explicit value and the passthrough loadbalancing header will be // set to enable pod-addressability. -func NewHeaderPruningReverseProxy(target, hostOverride string, headersToRemove []string, scheme string) *httputil.ReverseProxy { +func NewHeaderPruningReverseProxy(target, hostOverride string, headersToRemove []string, useHTTPS bool) *httputil.ReverseProxy { return &httputil.ReverseProxy{ Director: func(req *http.Request) { - req.URL.Scheme = scheme + if useHTTPS { + req.URL.Scheme = "https" + } else { + req.URL.Scheme = "http" + } req.URL.Host = target if hostOverride != NoHostOverride { diff --git a/pkg/http/proxy_test.go b/pkg/http/proxy_test.go index 5fb9a7ed3f2d..83b74300b37e 100644 --- a/pkg/http/proxy_test.go +++ b/pkg/http/proxy_test.go @@ -91,7 +91,7 @@ func TestNewHeaderPruningProxy(t *testing.T) { proxy := NewHeaderPruningReverseProxy(serverURL.Host, test.host, []string{ "header-to-remove-1", "header-to-remove-2", - }, "http") + }, false /* use HTTPS */) resp := httptest.NewRecorder() req := httptest.NewRequest(http.MethodPost, test.url, nil) @@ -182,7 +182,7 @@ func TestNewHeaderPruningProxyHTTPS(t *testing.T) { proxy := NewHeaderPruningReverseProxy(serverURL.Host, test.host, []string{ "header-to-remove-1", "header-to-remove-2", - }, "https") + }, true /* use HTTPS */) resp := httptest.NewRecorder() From 1bf67e584e9c442649602798841b90fbc1226dd7 Mon Sep 17 00:00:00 2001 From: Kenjiro Nakayama Date: Mon, 18 Apr 2022 00:17:59 +0900 Subject: [PATCH 4/5] Refactor loop --- cmd/queue/main.go | 52 +++++++++++++++++++++++------------------------ 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/cmd/queue/main.go b/cmd/queue/main.go index f99f8567873e..6b2eb3ee1620 100644 --- a/cmd/queue/main.go +++ b/cmd/queue/main.go @@ -174,20 +174,31 @@ func main() { tlsEnabled := exists(logger, certPath) && exists(logger, keyPath) mainServer, drain := buildServer(ctx, env, probe, stats, logger, concurrencyendpoint, false) - servers := map[string]*http.Server{ + httpServers := map[string]*http.Server{ "main": mainServer, "metrics": buildMetricsServer(promStatReporter, protoStatReporter), + "admin": buildAdminServer(logger, drain), } if env.EnableProfiling { - servers["profile"] = profiling.NewServer(profiling.NewHandler(logger, true)) + httpServers["profile"] = profiling.NewServer(profiling.NewHandler(logger, true)) } - // Use TLS for the admin port as well when TLS is available. - if !tlsEnabled { - servers["admin"] = buildAdminServer(logger, drain) + + // Enable TLS server when activator server certs are mounted. + // At this moment activator with TLS does not disable HTTP. + // See also https://github.com/knative/serving/issues/12808. + var tlsServers map[string]*http.Server + if tlsEnabled { + mainTLSServer, drain := buildServer(ctx, env, probe, stats, logger, concurrencyendpoint, true /* enable TLS */) + tlsServers = map[string]*http.Server{ + "tlsMain": mainTLSServer, + "tlsAdmin": buildAdminServer(logger, drain), + } + // Drop admin http server as we Use TLS for the admin server. + delete(httpServers, "admin") } errCh := make(chan error) - for name, server := range servers { + for name, server := range httpServers { go func(name string, s *http.Server) { // Don't forward ErrServerClosed as that indicates we're already shutting down. if err := s.ListenAndServe(); err != nil && !errors.Is(err, http.ErrServerClosed) { @@ -195,24 +206,13 @@ func main() { } }(name, server) } - - // Enable TLS server when activator server certs are mounted. - // At this moment activator with TLS does not disable HTTP. - // See also https://github.com/knative/serving/issues/12808. - if tlsEnabled { - mainTLSServer, drain := buildServer(ctx, env, probe, stats, logger, concurrencyendpoint, true /* enable TLS */) - tlsServers := map[string]*http.Server{ - "tlsMain": mainTLSServer, - "tlsAdmin": buildAdminServer(logger, drain), - } - for name, server := range tlsServers { - go func(name string, s *http.Server) { - // Don't forward ErrServerClosed as that indicates we're already shutting down. - if err := s.ListenAndServeTLS(certPath, keyPath); err != nil && !errors.Is(err, http.ErrServerClosed) { - errCh <- fmt.Errorf("%s server failed to serve: %w", name, err) - } - }(name, server) - } + for name, server := range tlsServers { + go func(name string, s *http.Server) { + // Don't forward ErrServerClosed as that indicates we're already shutting down. + if err := s.ListenAndServeTLS(certPath, keyPath); err != nil && !errors.Is(err, http.ErrServerClosed) { + errCh <- fmt.Errorf("%s server failed to serve: %w", name, err) + } + }(name, server) } // Blocks until we actually receive a TERM signal or one of the servers @@ -233,9 +233,9 @@ func main() { drain() // Removing the main server from the shutdown logic as we've already shut it down. - delete(servers, "main") + delete(httpServers, "main") - for serverName, srv := range servers { + for serverName, srv := range httpServers { logger.Info("Shutting down server: ", serverName) if err := srv.Shutdown(context.Background()); err != nil { logger.Errorw("Failed to shutdown server", zap.String("server", serverName), zap.Error(err)) From a37798439ac697cc3b1c3dec7ac854c4a004ba35 Mon Sep 17 00:00:00 2001 From: Kenjiro Nakayama Date: Mon, 18 Apr 2022 11:10:25 +0900 Subject: [PATCH 5/5] Add TODO comment --- cmd/activator/.main.go.swp | Bin 0 -> 16384 bytes cmd/queue/main.go | 2 ++ 2 files changed, 2 insertions(+) create mode 100644 cmd/activator/.main.go.swp diff --git a/cmd/activator/.main.go.swp b/cmd/activator/.main.go.swp new file mode 100644 index 0000000000000000000000000000000000000000..d00359f3e4b3202c58aefc28ea84b7b40597f477 GIT binary patch literal 16384 zcmeHNO^h5z6>dlZS%O0#BIE#4r6W7NX577YL}WHLXuZ3(NA~_?XKg3qC#mVKnQ5k{ zYpAZC*}-xEi4zB0+>hkRsp!L?9tJ@pIu6Ab=1FK~V(Zd)3uDv%fJ1 zE>W-a?M!!7y?XW4tE%^2O>^dAeTg0Sk2(0e$#Gtu`C#*<+GXd4Q?AoZd6$oPkBavD zYl%oIzF&=^?7>oC;J0OvsW89`9S?*bi9wJFHHg!85cZ;ghj`7kR6&o&DQdsE@Tx|| zK*hkTFmS-R^VrcDs&u=18@u^!7ha`)Ra`MpF;Fp3F;Fp3F;Fp3F;Fp3G4NVqK=-e8 zK8^vs-VQ3WpRe2Y`HKC!*S^==%Ku}_@3HTHzODQk`+UNFzRPw{_G4dH9~A=?0~G@m z0~G@m0~G@m0~G@m0~G@m0~G@m1OEpGc;q-YLhg$ch~xghjQ{`r7RUK5@EhO};9g(` z2ml9o1&y8pM!*d4mjjOT3*Z^xXTT4Dr+_DbZvu}1p9Vewi~t3+fCqpJz!I&i8=Y?)c^Fs#=7H;gYk?P#KzSN? z73%Ld?jUhH6_igDj)#>`6A{vG!MGlGJ-W5ye~! zPscssH$*7YsK)M^IlSe$m8YRM&Ag_RiP24}X&UT+E7J)&lFFG10(METO!fq|&RC|o z(s`e?c&3G7y2Dd86fDGt53>gDY+8FN?(Zq#>DcBp>;2&AGxG$ z%*S}V3}d0Euo(Rrp}kOVOrs8wO4)OD#SY%ft_jFkq#sqQuNS zS>c-KOQp&FbVgQnMzWshNi~8o@wQ+vEPTInhGoX!Gan)&(SnV_Hbar1k76>5M^X)w zti^`*FU7huUsNhzo#J7)tz@1?7>!v4m}CaK=VdE0eO;Qql?e;i0!BZ!7oJ8;>JGHX z@}{*@tRhs*wCwv1twKh>Iq67?CIzvS6~09fBvNK5W2~Twoe#^43iwj!ghmn4N$pi5%KxmYXHfiQB|e(TvF! z>k837!V~-oj$!$+gsV2nv1(%_(;l`FGsmkks;^>%8LS8D?Z%K2AI)PWBFh*!LZfBg z!@l-EG}5C!_Bms;)GiM-%@~Y>=UQeVPlQ?}3q8r)+2b?EXWVk+%or|AcG!urL5{0q zXKglQ*o9@$Py8^rgf@uOj4PDAzD#Meguu;T{kaY_fH8QcXLq+>3HH0~AQc)m(gh*V zV$WLGP1;2jwCQNwXM0wqQi$T2U8Q^191kzBb*WwXt3@cbPt&p%=|9;DhYXg4x|)`@ z>SSv>XM2uqQ@Nn1iMCtn5}`xU%*;&fR*0FSGaf}+@eqve)+0%=#ryPQYwXrbHiB*R zb}fn9DNnN9im>Ih;$(A@chEm!EW0%y&)^P5tnmc+?1~W@LzI2Db)Kk;v8C;hvw7?V zSqB?LM4RDms(@3i;86&L(t_X~+zfl_?(A~2N>J9>Ln{0N2tuJF1C#PPvVh}xqr8fUaVcrZ9x2;JrPz9e+hICs;2 zlRFfSeWtAgAR)!s4I-@P5>RFP7EQ51*UKV!aS41!xF^_tRI$anZRw+d@}+7AA2VFJ zZJe2T)5k6qbVZVh5pCaE4uuLf-7PaPavMqaY?OxPQ!7l(fWAs&vS2u{+lvc3p@LSA zICPB*YU+e))YmfDc+bpxslYz!!sf{`aE!Z7p)&1_eH`E96p%4F&f&mG;$95T0jm_} zSQ=q3!8yjx$02rG;X4h1LuvfNoHz8G^+g!B;t+>V+RjO0n_Te}=0J{jFV1M;XAJw3 zbE&?5cICo4yEM19Hn+T9Uu>|IH8#Jpyii}SuPo!~G@Dz#kDaS8FTmjje{`B{z$J$3 zit`QH!{v>9C&D41;$}{>utQfCXKb20 z_4&o+#^RAh~>&8*)7<`)Cq6Et2CE1?(_9X1|fQDY!Xr>XxY58wu^ZRHMxQOb99*XMb4?Jy(fPAcYdKM*YnC;@T2xDcLg_)&xPe>Eep{ zU-m>PN6&2(y;$Gc=Fmh`G>~x2`~~UR95SWILk?HSYruQyPZBB?iv_lPYnb%eyeNRWA@B(+V*+y2dP={4y^ zi9L!n{1rT0zEWh^QZYO$c!D$^<=V^6?cn_RB7lGzW_os+G5E!HZrI_?RDi$4Q-lN& znx{oZc$#g~h!T2s0xhj3&x162C)bf2re+Gs28slb->~n!Qnzi1NJ@m=4{eHc5r(P| zm>^EEV!;6jUr^+L!$zTW3QpT55}+JjjUp9{MIwPAO{ImO4V&smS)l1D-@9d zR5SHS@3Kdue{rvnAYJDn+C)58X2y*o>j(g$3v6tH38cdt)PVXwIG}uJkq|tGD-%qA zpSMl0M&Uy$(<5XXBQpmAS?zs0!T3u7bZ>#s0clzqB1J^zK_Vh+)2!7LElQ3w>{`mnsgpzUIj zu2--;$2UflY(2ZazG|t(IBDZB^*mTOrJ>Ct!*{ar6G!l6zC&wlf>*j_*t*_n#qO~< z$Lon!DH0dsxVKpSm=;3=S%%rHycAi}()`S>VfL>nqY6}Fh};X6F~J8xU16tm1BCP| zJ2=Fs3K29Ss9xTN(W3Rwo9yRY8a1fd`uT>rr*RRpOc^6YeZ>&JtJmnCQPMn zO*=K8$V^1$GRy2gFylMS&=A)~Y+E&k2{tAYW2J=1d}PS!-vJBZi8Z=Ov!`8XD)XTl z7CjP2$ju%?7J_yVJYAj_N>jRk#LTh}e8?1y-(3#z{Ys7DuFnRjvy7_P2S`17jQDKJ zVjm%%;+*$YCkc3@j;McX}-Z-!2 z(-tGrh-|Mx8EovwyhD=%tC&kNSu@rg=1NgEdNU!#dN~r&O@TFleFi6HgakKs`n(^R zTRqbXmoeC@i8cr;E*0ts?FFUpCn_+HsIk;dV@(#h8sx*ff=wp5DVv22xFcmZm}rs( jWTM4kYFC4~CBy##ry2)VFh#1