diff --git a/docs/install/install.rst b/docs/install/install.rst
index e764d42a33b6c..ada462336eba6 100644
--- a/docs/install/install.rst
+++ b/docs/install/install.rst
@@ -10,5 +10,5 @@ Building and installation
building
installation
ref_configs
- sandboxes
+ sandboxes/sandboxes.rst
tools
diff --git a/docs/install/sandboxes.rst b/docs/install/sandboxes/front_proxy.rst
similarity index 76%
rename from docs/install/sandboxes.rst
rename to docs/install/sandboxes/front_proxy.rst
index c88bfd80c4587..ff27de78fe70e 100644
--- a/docs/install/sandboxes.rst
+++ b/docs/install/sandboxes/front_proxy.rst
@@ -1,14 +1,7 @@
-.. _install_sandboxes:
-
-Sandboxes
-=========
-
-The docker-compose sandboxes give you different environments to test out Envoy's
-features. As we gauge people's interests we will add more sandboxes demonstrating
-different features.
+.. _install_sandboxes_front_proxy:
Front Proxy
------------
+===========
To get a flavor of what Envoy has to offer as a front proxy, we are releasing a
`docker compose `_ sandbox that deploys a front
@@ -233,88 +226,3 @@ statistics. For example inside ``frontenvoy`` we can get::
Notice that we can get the number of members of upstream clusters, number of requests
fulfilled by them, information about http ingress, and a plethora of other useful
stats.
-
-gRPC bridge
------------
-
-Envoy gRPC
-~~~~~~~~~~
-
-The gRPC bridge sandbox is an example usage of Envoy's
-:ref:`gRPC bridge filter `.
-Included in the sandbox is a gRPC in memory Key/Value store with a Python HTTP
-client. The Python client makes HTTP/1 requests through the Envoy sidecar
-process which are upgraded into HTTP/2 gRPC requests. Response trailers are then
-buffered and sent back to the client as a HTTP/1 header payload.
-
-Another Envoy feature demonstrated in this example is Envoy's ability to do authority
-base routing via its route configuration.
-
-Building the Go service
-~~~~~~~~~~~~~~~~~~~~~~~
-
-To build the Go gRPC service run::
-
- $ pwd
- ~/src/envoy/examples/grpc-bridge
- $ script/bootstrap
- $ script/build
-
-Docker compose
-~~~~~~~~~~~~~~
-
-To run the docker compose file, and set up both the Python and the gRPC containers
-run::
-
- $ pwd
- ~/src/envoy/examples/grpc-bridge
- $ docker-compose up --build
-
-Sending requests to the Key/Value store
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-To use the python service and sent gRPC requests::
-
- $ pwd
- ~/src/envoy/examples/grpc-bridge
- # set a key
- $ docker-compose exec python /client/client.py set foo bar
- setf foo to bar
-
- # get a key
- $ docker-compose exec python /client/client.py get foo
- bar
-
-Locally building a docker image with an envoy binary
-----------------------------------------------------
-
-The following steps guide you through building your own envoy binary, and
-putting that in a clean ubuntu container.
-
-**Step 1: Build Envoy**
-
-Using ``lyft/envoy-build`` you will compile envoy.
-This image has all software needed to build envoy. From your envoy directory::
-
- $ pwd
- src/envoy
- $ ./ci/run_envoy_docker.sh './ci/do_ci.sh bazel.dev'
-
-That command will take some time to run because it is compiling an envoy binary and running tests.
-
-For more information on building and different build targets, please refer to :repo:`ci/README.md`.
-
-**Step 2: Build image with only envoy binary**
-
-In this step we'll build an image that only has the envoy binary, and none
-of the software used to build it.::
-
- $ pwd
- src/envoy/
- $ docker build -f ci/Dockerfile-envoy-image -t envoy .
-
-Now you can use this ``envoy`` image to build the any of the sandboxes if you change
-the ``FROM`` line in any dockerfile.
-
-This will be particularly useful if you are interested in modifying envoy, and testing
-your changes.
diff --git a/docs/install/sandboxes/grpc_bridge.rst b/docs/install/sandboxes/grpc_bridge.rst
new file mode 100644
index 0000000000000..29b60d4b6bd1a
--- /dev/null
+++ b/docs/install/sandboxes/grpc_bridge.rst
@@ -0,0 +1,52 @@
+.. _install_sandboxes_grpc_bridge:
+
+gRPC Bridge
+===========
+
+Envoy gRPC
+~~~~~~~~~~
+
+The gRPC bridge sandbox is an example usage of Envoy's
+:ref:`gRPC bridge filter `.
+Included in the sandbox is a gRPC in-memory Key/Value store with a Python HTTP
+client. The Python client makes HTTP/1 requests through the Envoy sidecar
+process which are upgraded into HTTP/2 gRPC requests. Response trailers are then
+buffered and sent back to the client as a HTTP/1 header payload.
+
+Another Envoy feature demonstrated in this example is Envoy's ability to do authority
+base routing via its route configuration.
+
+Building the Go service
+~~~~~~~~~~~~~~~~~~~~~~~
+
+To build the Go gRPC service run::
+
+ $ pwd
+ ~/src/envoy/examples/grpc-bridge
+ $ script/bootstrap
+ $ script/build
+
+Docker compose
+~~~~~~~~~~~~~~
+
+To run the docker compose file, and set up both the Python and the gRPC containers
+run::
+
+ $ pwd
+ ~/src/envoy/examples/grpc-bridge
+ $ docker-compose up --build
+
+Sending requests to the Key/Value store
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+To use the Python service and send gRPC requests::
+
+ $ pwd
+ ~/src/envoy/examples/grpc-bridge
+ # set a key
+ $ docker-compose exec python /client/client.py set foo bar
+ setf foo to bar
+
+ # get a key
+ $ docker-compose exec python /client/client.py get foo
+ bar
diff --git a/docs/install/sandboxes/local_docker_build.rst b/docs/install/sandboxes/local_docker_build.rst
new file mode 100644
index 0000000000000..bffe398238350
--- /dev/null
+++ b/docs/install/sandboxes/local_docker_build.rst
@@ -0,0 +1,35 @@
+.. _install_sandboxes_local_docker_build:
+
+Building an Envoy Docker image
+==============================
+
+The following steps guide you through building your own Envoy binary, and
+putting that in a clean Ubuntu container.
+
+**Step 1: Build Envoy**
+
+Using ``lyft/envoy-build`` you will compile Envoy.
+This image has all software needed to build Envoy. From your Envoy directory::
+
+ $ pwd
+ src/envoy
+ $ ./ci/run_envoy_docker.sh './ci/do_ci.sh bazel.release'
+
+That command will take some time to run because it is compiling an Envoy binary and running tests.
+
+For more information on building and different build targets, please refer to :repo:`ci/README.md`.
+
+**Step 2: Build image with only envoy binary**
+
+In this step we'll build an image that only has the Envoy binary, and none
+of the software used to build it.::
+
+ $ pwd
+ src/envoy/
+ $ docker build -f ci/Dockerfile-envoy-image -t envoy .
+
+Now you can use this ``envoy`` image to build the any of the sandboxes if you change
+the ``FROM`` line in any Dockerfile.
+
+This will be particularly useful if you are interested in modifying Envoy, and testing
+your changes.
diff --git a/docs/install/sandboxes/sandboxes.rst b/docs/install/sandboxes/sandboxes.rst
new file mode 100644
index 0000000000000..03e025c2b0777
--- /dev/null
+++ b/docs/install/sandboxes/sandboxes.rst
@@ -0,0 +1,16 @@
+.. _install_sandboxes:
+
+Sandboxes
+=========
+
+The docker-compose sandboxes give you different environments to test out Envoy's
+features. As we gauge people's interests we will add more sandboxes demonstrating
+different features. The following sandboxes are available:
+
+.. toctree::
+ :maxdepth: 1
+
+ front_proxy
+ zipkin_tracing
+ grpc_bridge
+ local_docker_build
diff --git a/docs/install/sandboxes/zipkin_tracing.rst b/docs/install/sandboxes/zipkin_tracing.rst
new file mode 100644
index 0000000000000..81c93cf14357a
--- /dev/null
+++ b/docs/install/sandboxes/zipkin_tracing.rst
@@ -0,0 +1,82 @@
+.. _install_sandboxes_zipkin_tracing:
+
+Zipkin Tracing
+==============
+
+The Zipkin tracing sandbox demonstrates Envoy's :ref:`request tracing `
+capabilities using `Zipkin `_ as the tracing provider. This sandbox
+is very similar to the front proxy architecture described above, with one difference:
+service1 makes an API call to service2 before returning a response.
+The three containers will be deployed inside a virtual network called ``envoymesh``.
+
+All incoming requests are routed via the front envoy, which is acting as a reverse proxy
+sitting on the edge of the ``envoymesh`` network. Port ``80`` is mapped to port ``8000``
+by docker compose (see :repo:`/examples/zipkin-tracing/docker-compose.yml`). Notice that
+all envoys are configured to collect request traces (e.g., http_connection_manager/config/tracing setup in
+:repo:`/examples/zipkin-tracing/front-envoy-zipkin.json`) and setup to propagate the spans generated
+by the Zipkin tracer to a Zipkin cluster (trace driver setup
+in :repo:`/examples/zipkin-tracing/front-envoy-zipkin.json`).
+
+Before routing a request to the appropriate service envoy or the application, Envoy will take
+care of generating the appropriate spans for tracing (parent/child/shared context spans).
+At a high-level, each span records the latency of upstream API calls as well as information
+needed to correlate the span with other related spans (e.g., the trace ID).
+
+One of the most important benefits of tracing from Envoy is that it will take care of
+propagating the traces to the Zipkin service cluster. However, in order to fully take advantage
+of tracing, the application has to propagate trace headers that Envoy generates, while making
+calls to other services. In the sandbox we have provided, the simple flask app
+(see trace function in :repo:`/examples/front-proxy/service.py`) acting as service1 propagates
+the trace headers while making an outbound call to service2.
+
+
+Running the Sandbox
+~~~~~~~~~~~~~~~~~~~
+
+The following documentation runs through the setup of an envoy cluster organized
+as is described in the image above.
+
+**Step 1: Build the sandbox**
+
+To build this sandbox example, and start the example apps run the following commands::
+
+ $ pwd
+ envoy/examples/zipkin-tracing
+ $ docker-compose up --build -d
+ $ docker-compose ps
+ Name Command State Ports
+ -------------------------------------------------------------------------------------------------------------
+ zipkintracing_service1_1 /bin/sh -c /usr/local/bin/ ... Up 80/tcp
+ zipkintracing_service2_1 /bin/sh -c /usr/local/bin/ ... Up 80/tcp
+ zipkintracing_front-envoy_1 /bin/sh -c /usr/local/bin/ ... Up 0.0.0.0:8000->80/tcp, 0.0.0.0:8001->8001/tcp
+
+**Step 2: Generate some load**
+
+You can now send a request to service1 via the front-envoy as follows::
+
+ $ curl -v $(docker-machine ip default):8000/trace/1
+ * Trying 192.168.99.100...
+ * Connected to 192.168.99.100 (192.168.99.100) port 8000 (#0)
+ > GET /trace/1 HTTP/1.1
+ > Host: 192.168.99.100:8000
+ > User-Agent: curl/7.43.0
+ > Accept: */*
+ >
+ < HTTP/1.1 200 OK
+ < content-type: text/html; charset=utf-8
+ < content-length: 89
+ < x-envoy-upstream-service-time: 1
+ < server: envoy
+ < date: Fri, 26 Aug 2016 19:39:19 GMT
+ < x-envoy-protocol-version: HTTP/1.1
+ <
+ Hello from behind Envoy (service 1)! hostname: f26027f1ce28 resolvedhostname: 172.19.0.6
+ * Connection #0 to host 192.168.99.100 left intact
+
+**Step 3: View the traces in Zipkin UI**
+
+Point your browser to http://localhost:9411 . You should see the Zipkin dashboard.
+Set the service to "front-proxy" and set the start time to a few minutes before
+the start of the test (step 2) and hit enter. You should see traces from the front-proxy.
+Click on a trace to explore the path taken by the request from front-proxy to service1
+to service2, as well as the latency incurred at each hop.
diff --git a/docs/operations/faq/overview.rst b/docs/operations/faq/overview.rst
index a95617376e035..757ffeddae796 100644
--- a/docs/operations/faq/overview.rst
+++ b/docs/operations/faq/overview.rst
@@ -9,3 +9,4 @@ The purpose of this document is to link examples of commonly used deployment sce
:maxdepth: 1
zone_aware_routing
+ zipkin_tracing
diff --git a/docs/operations/faq/zipkin_tracing.rst b/docs/operations/faq/zipkin_tracing.rst
new file mode 100644
index 0000000000000..edbc0a263527d
--- /dev/null
+++ b/docs/operations/faq/zipkin_tracing.rst
@@ -0,0 +1,7 @@
+.. _common_configuration_zipkin_tracing:
+
+Request tracing with Zipkin
+===========================
+
+Refer to the :ref:`zipkin sandbox setup `
+for an example of zipkin tracing configuration.
diff --git a/examples/BUILD b/examples/BUILD
index 23c50f75f6c1f..31b9eb72be36a 100644
--- a/examples/BUILD
+++ b/examples/BUILD
@@ -14,5 +14,8 @@ filegroup(
"front-proxy/service-envoy.json",
"grpc-bridge/config/s2s-grpc-envoy.json",
"grpc-bridge/config/s2s-python-envoy.json",
+ "zipkin-tracing/front-envoy-zipkin.json",
+ "zipkin-tracing/service1-envoy-zipkin.json",
+ "zipkin-tracing/service2-envoy-zipkin.json",
],
)
diff --git a/examples/front-proxy/Dockerfile-frontenvoy b/examples/front-proxy/Dockerfile-frontenvoy
index 3ecf7aa89b388..5fc98e5cfc1ed 100644
--- a/examples/front-proxy/Dockerfile-frontenvoy
+++ b/examples/front-proxy/Dockerfile-frontenvoy
@@ -2,4 +2,4 @@ FROM lyft/envoy:latest
RUN apt-get update && apt-get -q install -y \
curl
-CMD /usr/local/bin/envoy -c /etc/front-envoy.json
+CMD /usr/local/bin/envoy -c /etc/front-envoy.json --service-cluster front-proxy
diff --git a/examples/front-proxy/service.py b/examples/front-proxy/service.py
index b44d084a68213..003129423021b 100644
--- a/examples/front-proxy/service.py
+++ b/examples/front-proxy/service.py
@@ -1,9 +1,21 @@
from flask import Flask
+from flask import request
import socket
import os
+import sys
+import requests
app = Flask(__name__)
+TRACE_HEADERS_TO_PROPAGATE = [
+ 'X-Ot-Span-Context',
+ 'X-Request-Id',
+ 'X-B3-TraceId',
+ 'X-B3-SpanId',
+ 'X-B3-ParentSpanId',
+ 'X-B3-Sampled',
+ 'X-B3-Flags'
+]
@app.route('/service/')
def hello(service_number):
@@ -12,5 +24,19 @@ def hello(service_number):
socket.gethostname(),
socket.gethostbyname(socket.gethostname())))
+@app.route('/trace/')
+def trace(service_number):
+ headers = {}
+ # call service 2 from service 1
+ if int(os.environ['SERVICE_NAME']) == 1 :
+ for header in TRACE_HEADERS_TO_PROPAGATE:
+ if header in request.headers:
+ headers[header] = request.headers[header]
+ ret = requests.get("http://localhost:9000/trace/2", headers=headers)
+ return ('Hello from behind Envoy (service {})! hostname: {} resolved'
+ 'hostname: {}\n'.format(os.environ['SERVICE_NAME'],
+ socket.gethostname(),
+ socket.gethostbyname(socket.gethostname())))
+
if __name__ == "__main__":
app.run(host='127.0.0.1', port=8080, debug=True)
diff --git a/examples/front-proxy/start_service.sh b/examples/front-proxy/start_service.sh
index cf98f2c5b9260..d4e6ad85dc3e6 100644
--- a/examples/front-proxy/start_service.sh
+++ b/examples/front-proxy/start_service.sh
@@ -1,2 +1,3 @@
+#!/bin/bash
python /code/service.py &
-envoy -c /etc/service-envoy.json
+envoy -c /etc/service-envoy.json --service-cluster service${SERVICE_NAME}
diff --git a/examples/zipkin-tracing/README.md b/examples/zipkin-tracing/README.md
new file mode 100644
index 0000000000000..200f212150c54
--- /dev/null
+++ b/examples/zipkin-tracing/README.md
@@ -0,0 +1,2 @@
+To learn about this sandbox and for instructions on how to run it please head over
+to the [envoy docs](https://lyft.github.io/envoy/docs/install/sandboxes.html#zipkin-tracing)
diff --git a/examples/zipkin-tracing/docker-compose.yml b/examples/zipkin-tracing/docker-compose.yml
new file mode 100644
index 0000000000000..287bf7e64bc15
--- /dev/null
+++ b/examples/zipkin-tracing/docker-compose.yml
@@ -0,0 +1,61 @@
+version: '2'
+services:
+
+ front-envoy:
+ build:
+ context: ../
+ dockerfile: front-proxy/Dockerfile-frontenvoy
+ volumes:
+ - ./front-envoy-zipkin.json:/etc/front-envoy.json
+ networks:
+ - envoymesh
+ expose:
+ - "80"
+ - "8001"
+ ports:
+ - "8000:80"
+ - "8001:8001"
+
+ service1:
+ build:
+ context: ../front-proxy
+ dockerfile: Dockerfile-service
+ volumes:
+ - ./service1-envoy-zipkin.json:/etc/service-envoy.json
+ networks:
+ envoymesh:
+ aliases:
+ - service1
+ environment:
+ - SERVICE_NAME=1
+ expose:
+ - "80"
+
+ service2:
+ build:
+ context: ../front-proxy
+ dockerfile: Dockerfile-service
+ volumes:
+ - ./service2-envoy-zipkin.json:/etc/service-envoy.json
+ networks:
+ envoymesh:
+ aliases:
+ - service2
+ environment:
+ - SERVICE_NAME=2
+ expose:
+ - "80"
+
+ zipkin:
+ image: openzipkin/zipkin
+ networks:
+ envoymesh:
+ aliases:
+ - zipkin
+ expose:
+ - "9411"
+ ports:
+ - "9411:9411"
+
+networks:
+ envoymesh: {}
diff --git a/examples/zipkin-tracing/front-envoy-zipkin.json b/examples/zipkin-tracing/front-envoy-zipkin.json
new file mode 100644
index 0000000000000..9a59280983ab2
--- /dev/null
+++ b/examples/zipkin-tracing/front-envoy-zipkin.json
@@ -0,0 +1,85 @@
+{
+ "listeners": [
+ {
+ "address": "tcp://0.0.0.0:80",
+ "filters": [
+ {
+ "type": "read",
+ "name": "http_connection_manager",
+ "config": {
+ "generate_request_id": true,
+ "tracing": {
+ "operation_name": "ingress"
+ },
+ "codec_type": "auto",
+ "stat_prefix": "ingress_http",
+ "route_config": {
+ "virtual_hosts": [
+ {
+ "name": "backend",
+ "domains": ["*"],
+ "routes": [
+ {
+ "timeout_ms": 0,
+ "prefix": "/trace/1",
+ "cluster": "service1"
+ }
+ ]
+ }
+ ]
+ },
+ "filters": [
+ {
+ "type": "decoder",
+ "name": "router",
+ "config": {}
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ],
+ "tracing": {
+ "http": {
+ "driver": {
+ "type": "zipkin",
+ "config": {
+ "collector_cluster": "zipkin",
+ "collector_endpoint": "/api/v1/spans"
+ }
+ }
+ }
+ },
+ "admin": {
+ "access_log_path": "/dev/null",
+ "address": "tcp://0.0.0.0:8001"
+ },
+ "cluster_manager": {
+ "clusters": [
+ {
+ "name": "service1",
+ "connect_timeout_ms": 250,
+ "type": "strict_dns",
+ "lb_type": "round_robin",
+ "features": "http2",
+ "hosts": [
+ {
+ "url": "tcp://service1:80"
+ }
+ ]
+ },
+ {
+ "name": "zipkin",
+ "connect_timeout_ms": 1000,
+ "type": "strict_dns",
+ "lb_type": "round_robin",
+ "hosts": [
+ {
+ "url": "tcp://zipkin:9411"
+ }
+ ]
+ }
+ ]
+ }
+}
diff --git a/examples/zipkin-tracing/service1-envoy-zipkin.json b/examples/zipkin-tracing/service1-envoy-zipkin.json
new file mode 100644
index 0000000000000..13ed9f7087a95
--- /dev/null
+++ b/examples/zipkin-tracing/service1-envoy-zipkin.json
@@ -0,0 +1,133 @@
+{
+ "listeners": [
+ {
+ "address": "tcp://0.0.0.0:80",
+ "filters": [
+ {
+ "type": "read",
+ "name": "http_connection_manager",
+ "config": {
+ "tracing": {
+ "operation_name": "ingress"
+ },
+ "codec_type": "auto",
+ "stat_prefix": "ingress_http",
+ "route_config": {
+ "virtual_hosts": [
+ {
+ "name": "service1",
+ "domains": ["*"],
+ "routes": [
+ {
+ "timeout_ms": 0,
+ "prefix": "/",
+ "cluster": "local_service"
+ }
+ ]
+ }
+ ]
+ },
+ "filters": [
+ {
+ "type": "decoder",
+ "name": "router",
+ "config": {}
+ }
+ ]
+ }
+ }
+ ]
+ },
+ {
+ "address": "tcp://0.0.0.0:9000",
+ "filters": [
+ {
+ "type": "read",
+ "name": "http_connection_manager",
+ "config": {
+ "tracing": {
+ "operation_name": "egress"
+ },
+ "codec_type": "auto",
+ "stat_prefix": "egress_http",
+ "route_config": {
+ "virtual_hosts": [
+ {
+ "name": "service2",
+ "domains": ["*"],
+ "routes": [
+ {
+ "timeout_ms": 0,
+ "prefix": "/trace/2",
+ "cluster": "service2"
+ }
+ ]
+ }
+ ]
+ },
+ "filters": [
+ {
+ "type": "decoder",
+ "name": "router",
+ "config": {}
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ],
+ "tracing": {
+ "http": {
+ "driver": {
+ "type": "zipkin",
+ "config": {
+ "collector_cluster": "zipkin",
+ "collector_endpoint": "/api/v1/spans"
+ }
+ }
+ }
+ },
+ "admin": {
+ "access_log_path": "/dev/null",
+ "address": "tcp://0.0.0.0:8001"
+ },
+ "cluster_manager": {
+ "clusters": [
+ {
+ "name": "local_service",
+ "connect_timeout_ms": 250,
+ "type": "strict_dns",
+ "lb_type": "round_robin",
+ "hosts": [
+ {
+ "url": "tcp://127.0.0.1:8080"
+ }
+ ]
+ },
+ {
+ "name": "service2",
+ "connect_timeout_ms": 250,
+ "type": "strict_dns",
+ "lb_type": "round_robin",
+ "features": "http2",
+ "hosts": [
+ {
+ "url": "tcp://service2:80"
+ }
+ ]
+ },
+ {
+ "name": "zipkin",
+ "connect_timeout_ms": 1000,
+ "type": "strict_dns",
+ "lb_type": "round_robin",
+ "hosts": [
+ {
+ "url": "tcp://zipkin:9411"
+ }
+ ]
+ }
+ ]
+ }
+}
diff --git a/examples/zipkin-tracing/service2-envoy-zipkin.json b/examples/zipkin-tracing/service2-envoy-zipkin.json
new file mode 100644
index 0000000000000..f22c4e9614d86
--- /dev/null
+++ b/examples/zipkin-tracing/service2-envoy-zipkin.json
@@ -0,0 +1,83 @@
+{
+ "listeners": [
+ {
+ "address": "tcp://0.0.0.0:80",
+ "filters": [
+ {
+ "type": "read",
+ "name": "http_connection_manager",
+ "config": {
+ "tracing": {
+ "operation_name": "ingress"
+ },
+ "codec_type": "auto",
+ "stat_prefix": "ingress_http",
+ "route_config": {
+ "virtual_hosts": [
+ {
+ "name": "service2",
+ "domains": ["*"],
+ "routes": [
+ {
+ "timeout_ms": 0,
+ "prefix": "/",
+ "cluster": "local_service"
+ }
+ ]
+ }
+ ]
+ },
+ "filters": [
+ {
+ "type": "decoder",
+ "name": "router",
+ "config": {}
+ }
+ ]
+ }
+ }
+ ]
+ }
+ ],
+ "tracing": {
+ "http": {
+ "driver": {
+ "type": "zipkin",
+ "config": {
+ "collector_cluster": "zipkin",
+ "collector_endpoint": "/api/v1/spans"
+ }
+ }
+ }
+ },
+ "admin": {
+ "access_log_path": "/dev/null",
+ "address": "tcp://0.0.0.0:8001"
+ },
+ "cluster_manager": {
+ "clusters": [
+ {
+ "name": "local_service",
+ "connect_timeout_ms": 250,
+ "type": "strict_dns",
+ "lb_type": "round_robin",
+ "hosts": [
+ {
+ "url": "tcp://127.0.0.1:8080"
+ }
+ ]
+ },
+ {
+ "name": "zipkin",
+ "connect_timeout_ms": 1000,
+ "type": "strict_dns",
+ "lb_type": "round_robin",
+ "hosts": [
+ {
+ "url": "tcp://zipkin:9411"
+ }
+ ]
+ }
+ ]
+ }
+}
diff --git a/test/config_test/example_configs_test.cc b/test/config_test/example_configs_test.cc
index ea2861094a1fa..72de448954595 100644
--- a/test/config_test/example_configs_test.cc
+++ b/test/config_test/example_configs_test.cc
@@ -8,6 +8,6 @@ namespace Envoy {
TEST(ExampleConfigsTest, All) {
TestEnvironment::exec(
{TestEnvironment::runfilesPath("test/config_test/example_configs_test_setup.sh")});
- EXPECT_EQ(8UL, ConfigTest::run(TestEnvironment::temporaryDirectory() + "/test/config_test"));
+ EXPECT_EQ(11UL, ConfigTest::run(TestEnvironment::temporaryDirectory() + "/test/config_test"));
}
} // Envoy