diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 423396ea8d7..59c825e7e28 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -4,7 +4,12 @@ on: push: branches: - master - + paths: + - 'docs/**' + - 'ext/**' + - 'opentelemetry-python/opentelemetry-api/src/opentelemetry/**' + - 'opentelemetry-python/opentelemetry-sdk/src/opentelemetry/sdk/**' + jobs: docs: runs-on: ubuntu-latest diff --git a/.travis.yml b/.travis.yml index 223cfbb587f..67e3a58da13 100644 --- a/.travis.yml +++ b/.travis.yml @@ -8,12 +8,15 @@ python: - '3.6' - '3.7' - '3.8' - - 'pypy3.5' + - 'pypy3' #matrix: # allow_failures: # - python: '3.8-dev' +services: + - docker + install: - pip install tox-travis diff --git a/README.md b/README.md index a8422735ef4..05f4df29bcc 100644 --- a/README.md +++ b/README.md @@ -117,9 +117,12 @@ Meeting notes are available as a public [Google doc](https://docs.google.com/doc Approvers ([@open-telemetry/python-approvers](https://github.com/orgs/open-telemetry/teams/python-approvers)): +- [Alex Boten](https://github.com/codeboten), LightStep - [Carlos Alberto Cortez](https://github.com/carlosalberto), LightStep - [Christian Neumüller](https://github.com/Oberon00), Dynatrace +- [Hector Hernandez](https://github.com/hectorhdzg), Microsoft - [Leighton Chen](https://github.com/lzchen), Microsoft +- [Mauricio Vásquez](https://github.com/mauriciovasquezbernal), Kinvolk - [Reiley Yang](https://github.com/reyang), Microsoft *Find more about the approver role in [community repository](https://github.com/open-telemetry/community/blob/master/community-membership.md#approver).* @@ -176,13 +179,13 @@ estimates, and subject to change. Future releases targets include: -| Component | Version | Target Date | -| ----------------------------------- | ---------- | ---------------- | -| Zipkin Trace Exporter | Alpha v0.4 | January 28 2020 | -| W3C Correlation Context Propagation | Alpha v0.4 | January 28 2020 | -| Support for Tags/Baggage | Alpha v0.4 | January 28 2020 | -| Metrics Aggregation | Alpha v0.4 | January 28 2020 | -| gRPC Integrations | Alpha v0.4 | January 28 2020 | -| Prometheus Metrics Exporter | Alpha v0.4 | January 28 2020 | -| OpenCensus Bridge | Alpha v0.4 | January 28 2020 | -| Metrics SDK (Complete) | Alpha v0.4 | January 28 2020 | +| Component | Version | Target Date | +| ----------------------------------- | ---------- | ----------------- | +| Zipkin Trace Exporter | Alpha v0.4 | February 21 2020 | +| W3C Correlation Context Propagation | Alpha v0.4 | February 21 2020 | +| Support for Tags/Baggage | Alpha v0.4 | February 21 2020 | +| Metrics Aggregation | Alpha v0.4 | February 21 2020 | +| gRPC Integrations | Alpha v0.4 | February 21 2020 | +| Prometheus Metrics Exporter | Alpha v0.4 | February 21 2020 | +| OpenCensus Bridge | Alpha v0.4 | February 21 2020 | +| Metrics SDK (Complete) | Alpha v0.4 | February 21 2020 | diff --git a/docs/opentelemetry.context.base_context.rst b/docs/opentelemetry.context.context.rst similarity index 73% rename from docs/opentelemetry.context.base_context.rst rename to docs/opentelemetry.context.context.rst index ac28d40008e..331557d2dde 100644 --- a/docs/opentelemetry.context.base_context.rst +++ b/docs/opentelemetry.context.context.rst @@ -1,7 +1,7 @@ opentelemetry.context.base\_context module ========================================== -.. automodule:: opentelemetry.context.base_context +.. automodule:: opentelemetry.context.context :members: :undoc-members: :show-inheritance: diff --git a/docs/opentelemetry.context.rst b/docs/opentelemetry.context.rst index 7bc738a0500..2b25793458c 100644 --- a/docs/opentelemetry.context.rst +++ b/docs/opentelemetry.context.rst @@ -6,7 +6,7 @@ Submodules .. toctree:: - opentelemetry.context.base_context + opentelemetry.context.context Module contents --------------- diff --git a/docs/opentelemetry.sdk.metrics.export.aggregate.rst b/docs/opentelemetry.sdk.metrics.export.aggregate.rst new file mode 100644 index 00000000000..7c9306c6846 --- /dev/null +++ b/docs/opentelemetry.sdk.metrics.export.aggregate.rst @@ -0,0 +1,7 @@ +opentelemetry.sdk.metrics.export.aggregate +========================================== + +.. automodule:: opentelemetry.sdk.metrics.export.aggregate + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/opentelemetry.sdk.metrics.export.batcher.rst b/docs/opentelemetry.sdk.metrics.export.batcher.rst new file mode 100644 index 00000000000..5dbd1d6e582 --- /dev/null +++ b/docs/opentelemetry.sdk.metrics.export.batcher.rst @@ -0,0 +1,11 @@ +opentelemetry.sdk.metrics.export.batcher +========================================== + +.. toctree:: + + opentelemetry.sdk.metrics.export + +.. automodule:: opentelemetry.sdk.metrics.export.batcher + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/opentelemetry.sdk.metrics.export.rst b/docs/opentelemetry.sdk.metrics.export.rst new file mode 100644 index 00000000000..1ae51170e4f --- /dev/null +++ b/docs/opentelemetry.sdk.metrics.export.rst @@ -0,0 +1,7 @@ +opentelemetry.sdk.metrics.export +========================================== + +.. automodule:: opentelemetry.sdk.metrics.export + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/opentelemetry.sdk.metrics.rst b/docs/opentelemetry.sdk.metrics.rst index 6d646c3b15f..ec8687dd2dc 100644 --- a/docs/opentelemetry.sdk.metrics.rst +++ b/docs/opentelemetry.sdk.metrics.rst @@ -1,6 +1,14 @@ opentelemetry.sdk.metrics package ========================================== +Submodules +---------- + +.. toctree:: + + opentelemetry.sdk.metrics.export.aggregate + opentelemetry.sdk.metrics.export.batcher + .. automodule:: opentelemetry.sdk.metrics :members: :undoc-members: diff --git a/examples/metrics/record.py b/examples/metrics/record.py new file mode 100644 index 00000000000..be68c8083ff --- /dev/null +++ b/examples/metrics/record.py @@ -0,0 +1,69 @@ +# Copyright 2019, OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +""" +This module serves as an example for a simple application using metrics. +It demonstrates the different ways you can record metrics via the meter. +""" +import time + +from opentelemetry import metrics +from opentelemetry.sdk.metrics import Counter, Meter +from opentelemetry.sdk.metrics.export import ConsoleMetricsExporter +from opentelemetry.sdk.metrics.export.controller import PushController + +# Meter is responsible for creating and recording metrics +metrics.set_preferred_meter_implementation(lambda _: Meter()) +meter = metrics.meter() +# exporter to export metrics to the console +exporter = ConsoleMetricsExporter() +# controller collects metrics created from meter and exports it via the +# exporter every interval +controller = PushController(meter, exporter, 5) + +# Example to show how to record using the meter +counter = meter.create_metric( + "requests", "number of requests", 1, int, Counter, ("environment",) +) + +counter2 = meter.create_metric( + "clicks", "number of clicks", 1, int, Counter, ("environment",) +) + +# Labelsets are used to identify key-values that are associated with a specific +# metric that you want to record. These are useful for pre-aggregation and can +# be used to store custom dimensions pertaining to a metric + +# The meter takes a dictionary of key value pairs +label_set = meter.get_label_set({"environment": "staging"}) + +# Handle usage +# You can record metrics with metric handles. Handles are created by passing in +# a labelset. A handle is essentially metric data that corresponds to a specific +# set of labels. Therefore, getting a handle using the same set of labels will +# yield the same metric handle. +counter_handle = counter.get_handle(label_set) +counter_handle.add(100) + +# Direct metric usage +# You can record metrics directly using the metric instrument. You pass in a +# labelset that you would like to record for. +counter.add(25, label_set) + +# Record batch usage +# You can record metrics in a batch by passing in a labelset and a sequence of +# (metric, value) pairs. The value would be recorded for each metric using the +# specified labelset for each. +meter.record_batch(label_set, [(counter, 50), (counter2, 70)]) +time.sleep(100) diff --git a/examples/metrics/stateful.py b/examples/metrics/stateful.py new file mode 100644 index 00000000000..c43f795e228 --- /dev/null +++ b/examples/metrics/stateful.py @@ -0,0 +1,72 @@ +# Copyright 2019, OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +""" +This module serves as an example for a simple application using metrics +Examples show how to recording affects the collection of metrics to be exported +""" +import time + +from opentelemetry import metrics +from opentelemetry.sdk.metrics import Counter, Meter +from opentelemetry.sdk.metrics.export import ConsoleMetricsExporter +from opentelemetry.sdk.metrics.export.batcher import UngroupedBatcher +from opentelemetry.sdk.metrics.export.controller import PushController + +# Batcher used to collect all created metrics from meter ready for exporting +# Pass in true/false to indicate whether the batcher is stateful. True +# indicates the batcher computes checkpoints from over the process lifetime. +# False indicates the batcher computes checkpoints which describe the updates +# of a single collection period (deltas) +batcher = UngroupedBatcher(True) +# If a batcher is not provded, a default batcher is used +# Meter is responsible for creating and recording metrics +metrics.set_preferred_meter_implementation(lambda _: Meter(batcher)) +meter = metrics.meter() +# exporter to export metrics to the console +exporter = ConsoleMetricsExporter() +# controller collects metrics created from meter and exports it via the +# exporter every interval +controller = PushController(meter, exporter, 5) + +counter = meter.create_metric( + "requests", "number of requests", 1, int, Counter, ("environment",) +) + +counter2 = meter.create_metric( + "clicks", "number of clicks", 1, int, Counter, ("environment",) +) + +# Labelsets are used to identify key-values that are associated with a specific +# metric that you want to record. These are useful for pre-aggregation and can +# be used to store custom dimensions pertaining to a metric +label_set = meter.get_label_set({"environment": "staging"}) +label_set2 = meter.get_label_set({"environment": "testing"}) + +counter.add(25, label_set) +# We sleep for 5 seconds, exported value should be 25 +time.sleep(5) + +counter.add(50, label_set) +# exported value should be 75 +time.sleep(5) + +counter.add(35, label_set2) +# should be two exported values 75 and 35, one for each labelset +time.sleep(5) + +counter2.add(5, label_set) +# should be three exported values, labelsets can be reused for different +# metrics but will be recorded seperately, 75, 35 and 5 +time.sleep(5) diff --git a/examples/metrics/stateless.py b/examples/metrics/stateless.py new file mode 100644 index 00000000000..69213cbddd3 --- /dev/null +++ b/examples/metrics/stateless.py @@ -0,0 +1,57 @@ +# Copyright 2019, OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +""" +This module serves as an example for a simple application using metrics +Examples show how to recording affects the collection of metrics to be exported +""" +import time + +from opentelemetry import metrics +from opentelemetry.sdk.metrics import Counter, Meter +from opentelemetry.sdk.metrics.export import ConsoleMetricsExporter +from opentelemetry.sdk.metrics.export.batcher import UngroupedBatcher +from opentelemetry.sdk.metrics.export.controller import PushController + +# Batcher used to collect all created metrics from meter ready for exporting +# Pass in false for non-stateful batcher. Indicates the batcher computes +# checkpoints which describe the updates of a single collection period (deltas) +batcher = UngroupedBatcher(False) +# Meter is responsible for creating and recording metrics +metrics.set_preferred_meter_implementation(lambda _: Meter(batcher)) +meter = metrics.meter() +# exporter to export metrics to the console +exporter = ConsoleMetricsExporter() +# controller collects metrics created from meter and exports it via the +# exporter every interval +controller = PushController(meter, exporter, 5) + +counter = meter.create_metric( + "requests", "number of requests", 1, int, Counter, ("environment",) +) + +# Labelsets are used to identify key-values that are associated with a specific +# metric that you want to record. These are useful for pre-aggregation and can +# be used to store custom dimensions pertaining to a metric +label_set = meter.get_label_set({"environment": "staging"}) + +counter.add(25, label_set) +# We sleep for 5 seconds, exported value should be 25 +time.sleep(5) + +counter.add(50, label_set) +# exported value should be 50 due to non-stateful batcher +time.sleep(20) + +# Following exported values would be 0 diff --git a/examples/opentelemetry-example-app/src/opentelemetry_example_app/flask_example.py b/examples/opentelemetry-example-app/src/opentelemetry_example_app/flask_example.py index ae484dd30e2..f7a9872b6b6 100644 --- a/examples/opentelemetry-example-app/src/opentelemetry_example_app/flask_example.py +++ b/examples/opentelemetry-example-app/src/opentelemetry_example_app/flask_example.py @@ -21,9 +21,8 @@ import requests import opentelemetry.ext.http_requests -from opentelemetry import propagators, trace +from opentelemetry import trace from opentelemetry.ext.flask import instrument_app -from opentelemetry.sdk.context.propagation.b3_format import B3Format from opentelemetry.sdk.trace import TracerSource @@ -43,17 +42,14 @@ def configure_opentelemetry(flask_app: flask.Flask): """ # Start by configuring all objects required to ensure # a complete end to end workflow. - # the preferred implementation of these objects must be set, + # The preferred implementation of these objects must be set, # as the opentelemetry-api defines the interface with a no-op # implementation. trace.set_preferred_tracer_source_implementation(lambda _: TracerSource()) + # Next, we need to configure how the values that are used by # traces and metrics are propagated (such as what specific headers # carry this value). - - # TBD: can remove once default TraceContext propagators are installed. - propagators.set_global_httptextformat(B3Format()) - # Integrations are the glue that binds the OpenTelemetry API # and the frameworks and libraries that are used together, automatically # creating Spans and propagating context as appropriate. diff --git a/examples/opentelemetry-example-app/src/opentelemetry_example_app/metrics_example.py b/examples/opentelemetry-example-app/src/opentelemetry_example_app/metrics_example.py index 246d6c3507d..2f423619021 100644 --- a/examples/opentelemetry-example-app/src/opentelemetry_example_app/metrics_example.py +++ b/examples/opentelemetry-example-app/src/opentelemetry_example_app/metrics_example.py @@ -18,8 +18,12 @@ from opentelemetry import metrics from opentelemetry.sdk.metrics import Counter, Meter +from opentelemetry.sdk.metrics.export import ConsoleMetricsExporter +from opentelemetry.sdk.metrics.export.batcher import UngroupedBatcher +from opentelemetry.sdk.metrics.export.controller import PushController -metrics.set_preferred_meter_implementation(lambda _: Meter()) +batcher = UngroupedBatcher(True) +metrics.set_preferred_meter_implementation(lambda _: Meter(batcher)) meter = metrics.meter() counter = meter.create_metric( "available memory", @@ -33,7 +37,7 @@ label_set = meter.get_label_set({"environment": "staging"}) # Direct metric usage -counter.add(label_set, 25) +counter.add(25, label_set) # Handle usage counter_handle = counter.get_handle(label_set) @@ -41,6 +45,6 @@ # Record batch usage meter.record_batch(label_set, [(counter, 50)]) -print(counter_handle.data) -# TODO: exporters +exporter = ConsoleMetricsExporter() +controller = PushController(meter, exporter, 5) diff --git a/examples/opentelemetry-example-app/tests/test_flask_example.py b/examples/opentelemetry-example-app/tests/test_flask_example.py index fd0b89e98c3..69be9e4bfc7 100644 --- a/examples/opentelemetry-example-app/tests/test_flask_example.py +++ b/examples/opentelemetry-example-app/tests/test_flask_example.py @@ -20,8 +20,8 @@ from werkzeug.wrappers import BaseResponse import opentelemetry_example_app.flask_example as flask_example -from opentelemetry.sdk import trace -from opentelemetry.sdk.context.propagation import b3_format +from opentelemetry import trace +from opentelemetry.sdk import trace as trace_sdk class TestFlaskExample(unittest.TestCase): @@ -46,7 +46,7 @@ def tearDown(self): self.send_patcher.stop() def test_full_path(self): - trace_id = trace.generate_trace_id() + trace_id = trace_sdk.generate_trace_id() # We need to use the Werkzeug test app because # The headers are injected at the wsgi layer. # The flask test app will not include these, and @@ -56,18 +56,17 @@ def test_full_path(self): client.get( "/", headers={ - "x-b3-traceid": b3_format.format_trace_id(trace_id), - "x-b3-spanid": b3_format.format_span_id( - trace.generate_span_id() - ), - "x-b3-sampled": "1", + "traceparent": "00-{:032x}-{:016x}-{:02x}".format( + trace_id, + trace_sdk.generate_span_id(), + trace.TraceOptions.SAMPLED, + ) }, ) # assert the http request header was propagated through. prepared_request = self.send.call_args[0][1] headers = prepared_request.headers - for required_header in {"x-b3-traceid", "x-b3-spanid", "x-b3-sampled"}: - self.assertIn(required_header, headers) - self.assertEqual( - headers["x-b3-traceid"], b3_format.format_trace_id(trace_id) + self.assertRegex( + headers["traceparent"], + r"00-{:032x}-[0-9a-f]{{16}}-01".format(trace_id), ) diff --git a/examples/opentracing/README.md b/examples/opentracing/README.md new file mode 100644 index 00000000000..2f7a9264179 --- /dev/null +++ b/examples/opentracing/README.md @@ -0,0 +1,90 @@ +# Overview + +This example shows how to use the [`opentelemetry-ext-opentracing-shim` +package](https://github.com/open-telemetry/opentelemetry-python/tree/master/ext/opentelemetry-ext-opentracing-shim) +to interact with libraries instrumented with +[`opentracing-python`](https://github.com/opentracing/opentracing-python). + +The included `rediscache` library creates spans via the OpenTracing Redis +integration, +[`redis_opentracing`](https://github.com/opentracing-contrib/python-redis). +Spans are exported via the Jaeger exporter, which is attached to the +OpenTelemetry tracer. + +## Installation + +### Jaeger + +Install and run +[Jaeger](https://www.jaegertracing.io/docs/latest/getting-started/#all-in-one). +See the [basic tracer +example](https://github.com/open-telemetry/opentelemetry-python/tree/master/examples/basic-tracer) +for more detail. + +### Redis + +Install Redis following the [instructions](https://redis.io/topics/quickstart). + +Make sure that the Redis server is running by executing this: + +```sh +$ redis-server +``` + +### Python Dependencies + +Install the Python dependencies in [`requirements.txt`](requirements.txt): + +```sh +$ pip install -r requirements.txt +``` + +Alternatively, you can install the Python dependencies separately: + +```sh +$ pip install \ + opentelemetry-api \ + opentelemetry-sdk \ + opentelemetry-ext-jaeger \ + opentelemetry-opentracing-shim \ + redis \ + redis_opentracing +``` + +## Run the Application + +The example script calculates a few Fibonacci numbers and stores the results in +Redis. The script, the `rediscache` library, and the OpenTracing Redis +integration all contribute spans to the trace. + +To run the script: + +```sh +$ python main.py +``` + +After running, you can view the generated trace in the Jaeger UI. + +#### Jaeger UI + +Open the Jaeger UI in your browser at + and view traces for the +"OpenTracing Shim Example" service. + +Each `main.py` run should generate a trace, and each trace should include +multiple spans that represent calls to Redis. + +

+ +Note that tags and logs (OpenTracing) and attributes and events (OpenTelemetry) +from both tracing systems appear in the exported trace. + +

+ +## Useful links +- For more information on OpenTelemetry, visit: +- For more information on tracing in Python, visit: + +## LICENSE + +Apache License 2.0 diff --git a/examples/opentracing/__init__.py b/examples/opentracing/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/examples/opentracing/images/jaeger-span-expanded.png b/examples/opentracing/images/jaeger-span-expanded.png new file mode 100644 index 00000000000..6da4b4b014d Binary files /dev/null and b/examples/opentracing/images/jaeger-span-expanded.png differ diff --git a/examples/opentracing/images/jaeger-trace-full.png b/examples/opentracing/images/jaeger-trace-full.png new file mode 100644 index 00000000000..c28255bd130 Binary files /dev/null and b/examples/opentracing/images/jaeger-trace-full.png differ diff --git a/examples/opentracing/main.py b/examples/opentracing/main.py new file mode 100755 index 00000000000..81d23f10e46 --- /dev/null +++ b/examples/opentracing/main.py @@ -0,0 +1,46 @@ +#!/usr/bin/env python + +from opentelemetry import trace +from opentelemetry.ext import opentracing_shim +from opentelemetry.ext.jaeger import JaegerSpanExporter +from opentelemetry.sdk.trace import TracerSource +from opentelemetry.sdk.trace.export import SimpleExportSpanProcessor +from rediscache import RedisCache + +# Configure the tracer using the default implementation +trace.set_preferred_tracer_source_implementation(lambda T: TracerSource()) +tracer_source = trace.tracer_source() + +# Configure the tracer to export traces to Jaeger +jaeger_exporter = JaegerSpanExporter( + service_name="OpenTracing Shim Example", + agent_host_name="localhost", + agent_port=6831, +) +span_processor = SimpleExportSpanProcessor(jaeger_exporter) +tracer_source.add_span_processor(span_processor) + +# Create an OpenTracing shim. This implements the OpenTracing tracer API, but +# forwards calls to the underlying OpenTelemetry tracer. +opentracing_tracer = opentracing_shim.create_tracer(tracer_source) + +# Our example caching library expects an OpenTracing-compliant tracer. +redis_cache = RedisCache(opentracing_tracer) + +# Appication code uses an OpenTelemetry Tracer as usual. +tracer = trace.tracer_source().get_tracer(__name__) + + +@redis_cache +def fib(number): + """Get the Nth Fibonacci number, cache intermediate results in Redis.""" + if number < 0: + raise ValueError + if number in (0, 1): + return number + return fib(number - 1) + fib(number - 2) + + +with tracer.start_as_current_span("Fibonacci") as span: + span.set_attribute("is_example", "yes :)") + fib(4) diff --git a/examples/opentracing/rediscache.py b/examples/opentracing/rediscache.py new file mode 100644 index 00000000000..6d9cd21662f --- /dev/null +++ b/examples/opentracing/rediscache.py @@ -0,0 +1,61 @@ +""" +This is an example of a library written to work with opentracing-python. It +provides a simple caching decorator backed by Redis, and uses the OpenTracing +Redis integration to automatically generate spans for each call to Redis. +""" + +import pickle +from functools import wraps + +# FIXME The pylint disablings are needed here because the code of this +# example is being executed against the tox.ini of the main +# opentelemetry-python project. Find a way to separate the two. +import redis # pylint: disable=import-error +import redis_opentracing # pylint: disable=import-error + + +class RedisCache: + """Redis-backed caching decorator, using OpenTracing! + + Args: + tracer: an opentracing.tracer.Tracer + """ + + def __init__(self, tracer): + redis_opentracing.init_tracing(tracer) + self.tracer = tracer + self.client = redis.StrictRedis() + + def __call__(self, func): + @wraps(func) + def inner(*args, **kwargs): + with self.tracer.start_active_span("Caching decorator") as scope1: + + # Pickle the call args to get a canonical key. Don't do this in + # prod! + key = pickle.dumps((func.__qualname__, args, kwargs)) + + pval = self.client.get(key) + if pval is not None: + val = pickle.loads(pval) + scope1.span.log_kv( + {"msg": "Found cached value", "val": val} + ) + return val + + scope1.span.log_kv({"msg": "Cache miss, calling function"}) + with self.tracer.start_active_span( + 'Call "{}"'.format(func.__name__) + ) as scope2: + scope2.span.set_tag("func", func.__name__) + scope2.span.set_tag("args", str(args)) + scope2.span.set_tag("kwargs", str(kwargs)) + + val = func(*args, **kwargs) + scope2.span.set_tag("val", str(val)) + + # Let keys expire after 10 seconds + self.client.setex(key, 10, pickle.dumps(val)) + return val + + return inner diff --git a/examples/opentracing/requirements.txt b/examples/opentracing/requirements.txt new file mode 100644 index 00000000000..d87842f4d89 --- /dev/null +++ b/examples/opentracing/requirements.txt @@ -0,0 +1,6 @@ +opentelemetry-api +opentelemetry-sdk +opentelemetry-ext-jaeger +opentelemetry-opentracing-shim +redis +redis_opentracing diff --git a/ext/opentelemetry-ext-dbapi/README.rst b/ext/opentelemetry-ext-dbapi/README.rst index f3eb9b241c9..3618453823d 100644 --- a/ext/opentelemetry-ext-dbapi/README.rst +++ b/ext/opentelemetry-ext-dbapi/README.rst @@ -14,7 +14,8 @@ Usage from opentelemetry.trace import tracer_source from opentelemetry.ext.dbapi import trace_integration - + trace.set_preferred_tracer_source_implementation(lambda T: TracerSource()) + tracer = trace.tracer_source().get_tracer(__name__) # Ex: mysql.connector trace_integration(tracer_source(), mysql.connector, "connect", "mysql") diff --git a/ext/opentelemetry-ext-dbapi/src/opentelemetry/ext/dbapi/__init__.py b/ext/opentelemetry-ext-dbapi/src/opentelemetry/ext/dbapi/__init__.py index 7ba1de1795a..88e9d3a0b12 100644 --- a/ext/opentelemetry-ext-dbapi/src/opentelemetry/ext/dbapi/__init__.py +++ b/ext/opentelemetry-ext-dbapi/src/opentelemetry/ext/dbapi/__init__.py @@ -18,6 +18,7 @@ https://www.python.org/dev/peps/pep-0249/ """ +import functools import logging import typing @@ -72,7 +73,6 @@ def wrap_connect( class DatabaseApiIntegration: - # pylint: disable=unused-argument def __init__( self, tracer: Tracer, @@ -80,8 +80,6 @@ def __init__( database_type: str = "sql", connection_attributes=None, ): - if tracer is None: - raise ValueError("The tracer is not provided.") self.connection_attributes = connection_attributes if self.connection_attributes is None: self.connection_attributes = { @@ -107,18 +105,40 @@ def wrapped_connection( """Add object proxy to connection object. """ connection = connect_method(*args, **kwargs) + self.get_connection_attributes(connection) + traced_connection = TracedConnectionProxy(connection, self) + return traced_connection + def get_connection_attributes(self, connection): + # Populate span fields using connection for key, value in self.connection_attributes.items(): - attribute = getattr(connection, value, None) + # Allow attributes nested in connection object + attribute = functools.reduce( + lambda attribute, attribute_value: getattr( + attribute, attribute_value, None + ), + value.split("."), + connection, + ) if attribute: self.connection_props[key] = attribute - traced_connection = TracedConnection(connection, self) - return traced_connection + self.name = self.database_component + self.database = self.connection_props.get("database", "") + if self.database: + self.name += "." + self.database + user = self.connection_props.get("user") + if user is not None: + self.span_attributes["db.user"] = user + host = self.connection_props.get("host") + if host is not None: + self.span_attributes["net.peer.name"] = host + port = self.connection_props.get("port") + if port is not None: + self.span_attributes["net.peer.port"] = port # pylint: disable=abstract-method -class TracedConnection(wrapt.ObjectProxy): - +class TracedConnectionProxy(wrapt.ObjectProxy): # pylint: disable=unused-argument def __init__( self, @@ -130,62 +150,17 @@ def __init__( wrapt.ObjectProxy.__init__(self, connection) self._db_api_integration = db_api_integration - self._db_api_integration.name = ( - self._db_api_integration.database_component - ) - self._db_api_integration.database = self._db_api_integration.connection_props.get( - "database", "" - ) - if self._db_api_integration.database: - self._db_api_integration.name += ( - "." + self._db_api_integration.database - ) - user = self._db_api_integration.connection_props.get("user") - if user is not None: - self._db_api_integration.span_attributes["db.user"] = user - host = self._db_api_integration.connection_props.get("host") - if host is not None: - self._db_api_integration.span_attributes["net.peer.name"] = host - port = self._db_api_integration.connection_props.get("port") - if port is not None: - self._db_api_integration.span_attributes["net.peer.port"] = port - def cursor(self, *args, **kwargs): - return TracedCursor( + return TracedCursorProxy( self.__wrapped__.cursor(*args, **kwargs), self._db_api_integration ) -# pylint: disable=abstract-method -class TracedCursor(wrapt.ObjectProxy): - - # pylint: disable=unused-argument - def __init__( - self, - cursor, - db_api_integration: DatabaseApiIntegration, - *args, - **kwargs - ): - wrapt.ObjectProxy.__init__(self, cursor) +class TracedCursor: + def __init__(self, db_api_integration: DatabaseApiIntegration): self._db_api_integration = db_api_integration - def execute(self, *args, **kwargs): - return self._traced_execution( - self.__wrapped__.execute, *args, **kwargs - ) - - def executemany(self, *args, **kwargs): - return self._traced_execution( - self.__wrapped__.executemany, *args, **kwargs - ) - - def callproc(self, *args, **kwargs): - return self._traced_execution( - self.__wrapped__.callproc, *args, **kwargs - ) - - def _traced_execution( + def traced_execution( self, query_method: typing.Callable[..., any], *args: typing.Tuple[any, any], @@ -223,3 +198,33 @@ def _traced_execution( except Exception as ex: # pylint: disable=broad-except span.set_status(Status(StatusCanonicalCode.UNKNOWN, str(ex))) raise ex + + +# pylint: disable=abstract-method +class TracedCursorProxy(wrapt.ObjectProxy): + + # pylint: disable=unused-argument + def __init__( + self, + cursor, + db_api_integration: DatabaseApiIntegration, + *args, + **kwargs + ): + wrapt.ObjectProxy.__init__(self, cursor) + self._traced_cursor = TracedCursor(db_api_integration) + + def execute(self, *args, **kwargs): + return self._traced_cursor.traced_execution( + self.__wrapped__.execute, *args, **kwargs + ) + + def executemany(self, *args, **kwargs): + return self._traced_cursor.traced_execution( + self.__wrapped__.executemany, *args, **kwargs + ) + + def callproc(self, *args, **kwargs): + return self._traced_cursor.traced_execution( + self.__wrapped__.callproc, *args, **kwargs + ) diff --git a/ext/opentelemetry-ext-docker-tests/tests/docker-compose.yml b/ext/opentelemetry-ext-docker-tests/tests/docker-compose.yml new file mode 100644 index 00000000000..1dab842f898 --- /dev/null +++ b/ext/opentelemetry-ext-docker-tests/tests/docker-compose.yml @@ -0,0 +1,7 @@ +version: '3' + +services: + otmongo: + ports: + - "27017:27017" + image: mongo:latest \ No newline at end of file diff --git a/ext/opentelemetry-ext-docker-tests/tests/pymongo/test_pymongo_functional.py b/ext/opentelemetry-ext-docker-tests/tests/pymongo/test_pymongo_functional.py new file mode 100644 index 00000000000..4ef14fd789b --- /dev/null +++ b/ext/opentelemetry-ext-docker-tests/tests/pymongo/test_pymongo_functional.py @@ -0,0 +1,108 @@ +# Copyright 2020, OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import typing +import unittest + +from pymongo import MongoClient + +from opentelemetry import trace as trace_api +from opentelemetry.ext.pymongo import trace_integration +from opentelemetry.sdk.trace import Span, Tracer, TracerSource +from opentelemetry.sdk.trace.export import SimpleExportSpanProcessor +from opentelemetry.sdk.trace.export.in_memory_span_exporter import ( + InMemorySpanExporter, +) + +MONGODB_HOST = os.getenv("MONGODB_HOST ", "localhost") +MONGODB_PORT = int(os.getenv("MONGODB_PORT ", "27017")) +MONGODB_DB_NAME = os.getenv("MONGODB_DB_NAME ", "opentelemetry-tests") +MONGODB_COLLECTION_NAME = "test" + + +class TestFunctionalPymongo(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls._tracer_source = TracerSource() + cls._tracer = Tracer(cls._tracer_source, None) + cls._span_exporter = InMemorySpanExporter() + cls._span_processor = SimpleExportSpanProcessor(cls._span_exporter) + cls._tracer_source.add_span_processor(cls._span_processor) + trace_integration(cls._tracer) + client = MongoClient( + MONGODB_HOST, MONGODB_PORT, serverSelectionTimeoutMS=2000 + ) + db = client[MONGODB_DB_NAME] + cls._collection = db[MONGODB_COLLECTION_NAME] + + def setUp(self): + self._span_exporter.clear() + + def validate_spans(self): + spans = self._span_exporter.get_finished_spans() + self.assertEqual(len(spans), 2) + for span in spans: + if span.name == "rootSpan": + root_span = span + else: + pymongo_span = span + self.assertIsInstance(span.start_time, int) + self.assertIsInstance(span.end_time, int) + self.assertIsNot(root_span, None) + self.assertIsNot(pymongo_span, None) + self.assertIsNotNone(pymongo_span.parent) + self.assertEqual(pymongo_span.parent.name, root_span.name) + self.assertIs(pymongo_span.kind, trace_api.SpanKind.CLIENT) + self.assertEqual( + pymongo_span.attributes["db.instance"], MONGODB_DB_NAME + ) + self.assertEqual( + pymongo_span.attributes["net.peer.name"], MONGODB_HOST + ) + self.assertEqual( + pymongo_span.attributes["net.peer.port"], MONGODB_PORT + ) + + def test_insert(self): + """Should create a child span for insert + """ + with self._tracer.start_as_current_span("rootSpan"): + self._collection.insert_one( + {"name": "testName", "value": "testValue"} + ) + self.validate_spans() + + def test_update(self): + """Should create a child span for update + """ + with self._tracer.start_as_current_span("rootSpan"): + self._collection.update_one( + {"name": "testName"}, {"$set": {"value": "someOtherValue"}} + ) + self.validate_spans() + + def test_find(self): + """Should create a child span for find + """ + with self._tracer.start_as_current_span("rootSpan"): + self._collection.find_one() + self.validate_spans() + + def test_delete(self): + """Should create a child span for delete + """ + with self._tracer.start_as_current_span("rootSpan"): + self._collection.delete_one({"name": "testName"}) + self.validate_spans() diff --git a/ext/opentelemetry-ext-flask/src/opentelemetry/ext/flask/__init__.py b/ext/opentelemetry-ext-flask/src/opentelemetry/ext/flask/__init__.py index 4bf0cc8c681..6646ff44a3e 100644 --- a/ext/opentelemetry-ext-flask/src/opentelemetry/ext/flask/__init__.py +++ b/ext/opentelemetry-ext-flask/src/opentelemetry/ext/flask/__init__.py @@ -12,30 +12,21 @@ logger = logging.getLogger(__name__) -_ENVIRON_STARTTIME_KEY = "opentelemetry-flask.starttime_key" -_ENVIRON_SPAN_KEY = "opentelemetry-flask.span_key" -_ENVIRON_ACTIVATION_KEY = "opentelemetry-flask.activation_key" - def instrument_app(flask): """Makes the passed-in Flask object traced by OpenTelemetry. You must not call this function multiple times on the same Flask object. """ - wsgi = flask.wsgi_app + tracer = trace.tracer_source().get_tracer(__name__, __version__) def wrapped_app(environ, start_response): - # We want to measure the time for route matching, etc. - # In theory, we could start the span here and use update_name later - # but that API is "highly discouraged" so we better avoid it. - environ[_ENVIRON_STARTTIME_KEY] = time_ns() - def _start_response(status, response_headers, *args, **kwargs): - span = flask_request.environ.get(_ENVIRON_SPAN_KEY) + span = tracer.get_current_span() if span: otel_wsgi.add_response_attributes( - span, status, response_headers + span.status, response_headers ) else: logger.warning( @@ -44,54 +35,53 @@ def _start_response(status, response_headers, *args, **kwargs): ) return start_response(status, response_headers, *args, **kwargs) - return wsgi(environ, _start_response) - - flask.wsgi_app = wrapped_app - - flask.before_request(_before_flask_request) - flask.teardown_request(_teardown_flask_request) - - -def _before_flask_request(): - environ = flask_request.environ - span_name = flask_request.endpoint or otel_wsgi.get_default_span_name( - environ - ) - parent_span = propagators.extract( - otel_wsgi.get_header_from_environ, environ - ) + try: + iterable = wsgi(environ, _start_response) + for yielded in iterable: + yield yielded + except Exception as error: # noqa + # TODO Set span status (cf. https://github.com/open-telemetry/opentelemetry-python/issues/292) + span = tracer.get_current_span() + if span: + span.set_status( + trace.status.Status( + trace.status.StatusCanonicalCode.UNKNOWN, + description="{}: {}".format( + type(error).__name__, error + ), + ) + ) + raise + finally: + close = getattr(iterable, "close", None) + if close: + close() + span = tracer.get_current_span() + if span: + span.end() - tracer = trace.tracer_source().get_tracer(__name__, __version__) + def _before_flask_request(): + environ = flask_request.environ + span_name = flask_request.endpoint or otel_wsgi.get_default_span_name( + environ + ) + parent_span = propagators.extract( + otel_wsgi.get_header_from_environ, environ + ) - attributes = otel_wsgi.collect_request_attributes(environ) - if flask_request.url_rule: - # For 404 that result from no route found, etc, we don't have a url_rule. - attributes["http.route"] = flask_request.url_rule.rule - span = tracer.start_span( - span_name, - parent_span, - kind=trace.SpanKind.SERVER, - attributes=attributes, - start_time=environ.get(_ENVIRON_STARTTIME_KEY), - ) - activation = tracer.use_span(span, end_on_exit=True) - activation.__enter__() - environ[_ENVIRON_ACTIVATION_KEY] = activation - environ[_ENVIRON_SPAN_KEY] = span + span = tracer.start_span( + span_name, + parent_span, + kind=trace.SpanKind.SERVER, + attributes=otel_wsgi.collect_request_attributes(environ), + ) + if flask_request.url_rule: + # For 404 that result from no route found, etc, we don't have a url_rule. + span.set_attribute("http.route", flask_request.url_rule.rule) -def _teardown_flask_request(exc): - activation = flask_request.environ.get(_ENVIRON_ACTIVATION_KEY) - if not activation: - logger.warning( - "Flask environ's OpenTelemetry activation missing at _teardown_flask_request(%s)", - exc, - ) - return + activation = tracer.use_span(span, end_on_exit=True) + activation.__enter__() - if exc is None: - activation.__exit__(None, None, None) - else: - activation.__exit__( - type(exc), exc, getattr(exc, "__traceback__", None) - ) + flask.wsgi_app = wrapped_app + flask.before_request(_before_flask_request) diff --git a/ext/opentelemetry-ext-flask/tests/test_flask_integration.py b/ext/opentelemetry-ext-flask/tests/test_flask_integration.py index 9d2f2560118..4906760d19d 100644 --- a/ext/opentelemetry-ext-flask/tests/test_flask_integration.py +++ b/ext/opentelemetry-ext-flask/tests/test_flask_integration.py @@ -14,7 +14,7 @@ import unittest -from flask import Flask, request +from flask import Flask, copy_current_request_context, request from werkzeug.test import Client from werkzeug.wrappers import BaseResponse @@ -76,6 +76,35 @@ def assert_environ(): self.client.get("/assert_environ") self.assertEqual(nonstring_keys, set()) + def test_copy_current_request(self): + """ + Regression test to verify copy_current_request also sets + spans correctly. + """ + + def copy_current_request(): + @copy_current_request_context + def nest(): + return "hello" + + return nest() + + self.app.route("/copy_current_request")(copy_current_request) + self.client.get("/copy_current_request") + span_list = self.memory_exporter.get_finished_spans() + self.assertEqual(len(span_list), 1) + self.assertEqual(span_list[0].name, "copy_current_request") + self.assertEqual(span_list[0].kind, trace_api.SpanKind.SERVER) + self.assertEqual( + span_list[0].attributes, + expected_attributes( + { + "http.target": "/copy_current_request", + "http.route": "/copy_current_request", + } + ), + ) + def test_simple(self): expected_attrs = expected_attributes( { diff --git a/ext/opentelemetry-ext-http-requests/src/opentelemetry/ext/http_requests/__init__.py b/ext/opentelemetry-ext-http-requests/src/opentelemetry/ext/http_requests/__init__.py index 4f5a18cf9ea..a557e6fc453 100644 --- a/ext/opentelemetry-ext-http-requests/src/opentelemetry/ext/http_requests/__init__.py +++ b/ext/opentelemetry-ext-http-requests/src/opentelemetry/ext/http_requests/__init__.py @@ -22,8 +22,7 @@ from requests.sessions import Session -from opentelemetry import propagators -from opentelemetry.context import Context +from opentelemetry import context, propagators from opentelemetry.ext.http_requests.version import __version__ from opentelemetry.trace import SpanKind @@ -54,7 +53,7 @@ def enable(tracer_source): @functools.wraps(wrapped) def instrumented_request(self, method, url, *args, **kwargs): - if Context.suppress_instrumentation: + if context.get_value("suppress_instrumentation"): return wrapped(self, method, url, *args, **kwargs) # See diff --git a/ext/opentelemetry-ext-jaeger/CHANGELOG.md b/ext/opentelemetry-ext-jaeger/CHANGELOG.md index 05ffbffe0f6..6cdbefa823a 100644 --- a/ext/opentelemetry-ext-jaeger/CHANGELOG.md +++ b/ext/opentelemetry-ext-jaeger/CHANGELOG.md @@ -2,6 +2,9 @@ ## Unreleased +- Export span status ([#367](https://github.com/open-telemetry/opentelemetry-python/pull/367)) +- Export span kind ([#387](https://github.com/open-telemetry/opentelemetry-python/pull/387)) + ## 0.3a0 Released 2019-12-11 diff --git a/ext/opentelemetry-ext-jaeger/src/opentelemetry/ext/jaeger/__init__.py b/ext/opentelemetry-ext-jaeger/src/opentelemetry/ext/jaeger/__init__.py index e9d5c5884d0..a90313d9131 100644 --- a/ext/opentelemetry-ext-jaeger/src/opentelemetry/ext/jaeger/__init__.py +++ b/ext/opentelemetry-ext-jaeger/src/opentelemetry/ext/jaeger/__init__.py @@ -26,6 +26,7 @@ from opentelemetry.ext.jaeger.gen.agent import Agent as agent from opentelemetry.ext.jaeger.gen.jaeger import Collector as jaeger from opentelemetry.sdk.trace.export import Span, SpanExporter, SpanExportResult +from opentelemetry.trace.status import StatusCanonicalCode DEFAULT_AGENT_HOST_NAME = "localhost" DEFAULT_AGENT_PORT = 6831 @@ -145,6 +146,8 @@ def _translate_to_jaeger(spans: Span): start_time_us = _nsec_to_usec_round(span.start_time) duration_us = _nsec_to_usec_round(span.end_time - span.start_time) + status = span.status + parent_id = 0 if isinstance(span.parent, trace_api.Span): parent_id = span.parent.get_context().span_id @@ -153,8 +156,17 @@ def _translate_to_jaeger(spans: Span): tags = _extract_tags(span.attributes) - # TODO: status is missing: - # https://github.com/open-telemetry/opentelemetry-python/issues/98 + tags.extend( + [ + _get_long_tag("status.code", status.canonical_code.value), + _get_string_tag("status.message", status.description), + _get_string_tag("span.kind", span.kind.name), + ] + ) + + # Ensure that if Status.Code is not OK, that we set the "error" tag on the Jaeger span. + if status.canonical_code is not StatusCanonicalCode.OK: + tags.append(_get_bool_tag("error", True)) refs = _extract_refs_from_span(span) logs = _extract_logs_from_span(span) @@ -222,9 +234,7 @@ def _extract_logs_from_span(span): logs = [] for event in span.events: - fields = [] - if event.attributes is not None: - fields = _extract_tags(event.attributes) + fields = _extract_tags(event.attributes) fields.append( jaeger.Tag( @@ -241,7 +251,7 @@ def _extract_logs_from_span(span): def _extract_tags(attr): if not attr: - return None + return [] tags = [] for attribute_key, attribute_value in attr.items(): tag = _convert_attribute_to_tag(attribute_key, attribute_value) @@ -265,6 +275,18 @@ def _convert_attribute_to_tag(key, attr): return None +def _get_long_tag(key, val): + return jaeger.Tag(key=key, vLong=val, vType=jaeger.TagType.LONG) + + +def _get_string_tag(key, val): + return jaeger.Tag(key=key, vStr=val, vType=jaeger.TagType.STRING) + + +def _get_bool_tag(key, val): + return jaeger.Tag(key=key, vBool=val, vType=jaeger.TagType.BOOL) + + class AgentClientUDP: """Implement a UDP client to agent. diff --git a/ext/opentelemetry-ext-jaeger/tests/test_jaeger_exporter.py b/ext/opentelemetry-ext-jaeger/tests/test_jaeger_exporter.py index f8ead96ef06..08c5a4adeda 100644 --- a/ext/opentelemetry-ext-jaeger/tests/test_jaeger_exporter.py +++ b/ext/opentelemetry-ext-jaeger/tests/test_jaeger_exporter.py @@ -22,6 +22,7 @@ from opentelemetry import trace as trace_api from opentelemetry.ext.jaeger.gen.jaeger import ttypes as jaeger from opentelemetry.sdk import trace +from opentelemetry.trace.status import Status, StatusCanonicalCode class TestJaegerSpanExporter(unittest.TestCase): @@ -155,6 +156,22 @@ def test_translate_to_jaeger(self): context=other_context, attributes=link_attributes ) + default_tags = [ + jaeger.Tag( + key="status.code", + vType=jaeger.TagType.LONG, + vLong=StatusCanonicalCode.OK.value, + ), + jaeger.Tag( + key="status.message", vType=jaeger.TagType.STRING, vStr=None + ), + jaeger.Tag( + key="span.kind", + vType=jaeger.TagType.STRING, + vStr=trace_api.SpanKind.INTERNAL.name, + ), + ] + otel_spans = [ trace.Span( name=span_names[0], @@ -162,6 +179,7 @@ def test_translate_to_jaeger(self): parent=parent_context, events=(event,), links=(link,), + kind=trace_api.SpanKind.CLIENT, ), trace.Span( name=span_names[1], context=parent_context, parent=None @@ -174,6 +192,9 @@ def test_translate_to_jaeger(self): otel_spans[0].set_attribute("key_bool", False) otel_spans[0].set_attribute("key_string", "hello_world") otel_spans[0].set_attribute("key_float", 111.22) + otel_spans[0].set_status( + Status(StatusCanonicalCode.UNKNOWN, "Example description") + ) otel_spans[0].end(end_time=end_times[0]) otel_spans[1].start(start_time=start_times[1]) @@ -209,6 +230,24 @@ def test_translate_to_jaeger(self): vType=jaeger.TagType.DOUBLE, vDouble=111.22, ), + jaeger.Tag( + key="status.code", + vType=jaeger.TagType.LONG, + vLong=StatusCanonicalCode.UNKNOWN.value, + ), + jaeger.Tag( + key="status.message", + vType=jaeger.TagType.STRING, + vStr="Example description", + ), + jaeger.Tag( + key="span.kind", + vType=jaeger.TagType.STRING, + vStr=trace_api.SpanKind.CLIENT.name, + ), + jaeger.Tag( + key="error", vType=jaeger.TagType.BOOL, vBool=True + ), ], references=[ jaeger.SpanRef( @@ -255,6 +294,7 @@ def test_translate_to_jaeger(self): startTime=start_times[1] // 10 ** 3, duration=durations[1] // 10 ** 3, flags=0, + tags=default_tags, ), jaeger.Span( operationName=span_names[2], @@ -265,6 +305,7 @@ def test_translate_to_jaeger(self): startTime=start_times[2] // 10 ** 3, duration=durations[2] // 10 ** 3, flags=0, + tags=default_tags, ), ] diff --git a/ext/opentelemetry-ext-opentracing-shim/src/opentelemetry/ext/opentracing_shim/__init__.py b/ext/opentelemetry-ext-opentracing-shim/src/opentelemetry/ext/opentracing_shim/__init__.py index 7c7640017bf..b7753754db0 100644 --- a/ext/opentelemetry-ext-opentracing-shim/src/opentelemetry/ext/opentracing_shim/__init__.py +++ b/ext/opentelemetry-ext-opentracing-shim/src/opentelemetry/ext/opentracing_shim/__init__.py @@ -80,6 +80,9 @@ https://docs.python.org/3/tutorial/floatingpoint.html """ +# TODO: make pylint use 3p opentracing module for type inference +# pylint:disable=no-member + import logging import opentracing @@ -89,6 +92,7 @@ from opentelemetry import propagators from opentelemetry.ext.opentracing_shim import util from opentelemetry.ext.opentracing_shim.version import __version__ +from opentelemetry.trace import DefaultSpan logger = logging.getLogger(__name__) @@ -101,10 +105,10 @@ def create_tracer(otel_tracer_source): :class:`opentracing.Tracer` using OpenTelemetry under the hood. Args: - otel_tracer_source: A :class:`opentelemetry.trace.TracerSource` to be used for - constructing the :class:`TracerShim`. A tracer from this source will be used - to perform the actual tracing when user code is instrumented using - the OpenTracing API. + otel_tracer_source: A :class:`opentelemetry.trace.TracerSource` to be + used for constructing the :class:`TracerShim`. A tracer from this + source will be used to perform the actual tracing when user code is + instrumented using the OpenTracing API. Returns: The created :class:`TracerShim`. @@ -265,7 +269,7 @@ def log(self, **kwargs): def log_event(self, event, payload=None): super().log_event(event, payload=payload) - def set_baggage_item(self, key, value): + def set_baggage_item(self, key, value): # pylint:disable=unused-argument """Implements the ``set_baggage_item()`` method from the base class. Warning: @@ -278,7 +282,7 @@ def set_baggage_item(self, key, value): ) # TODO: Implement. - def get_baggage_item(self, key): + def get_baggage_item(self, key): # pylint:disable=unused-argument """Implements the ``get_baggage_item()`` method from the base class. Warning: @@ -667,12 +671,16 @@ def inject(self, span_context, format, carrier): # uses the configured propagators in opentelemetry.propagators. # TODO: Support Format.BINARY once it is supported in # opentelemetry-python. + if format not in self._supported_formats: raise opentracing.UnsupportedFormatException propagator = propagators.get_global_httptextformat() + propagator.inject( - span_context.unwrap(), type(carrier).__setitem__, carrier + DefaultSpan(span_context.unwrap()), + type(carrier).__setitem__, + carrier, ) def extract(self, format, carrier): diff --git a/ext/opentelemetry-ext-opentracing-shim/tests/test_shim.py b/ext/opentelemetry-ext-opentracing-shim/tests/test_shim.py index d42098dce7c..eacfc639b37 100644 --- a/ext/opentelemetry-ext-opentracing-shim/tests/test_shim.py +++ b/ext/opentelemetry-ext-opentracing-shim/tests/test_shim.py @@ -12,8 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. +# TODO: make pylint use 3p opentracing module for type inference +# pylint:disable=no-member + import time -import unittest +from unittest import TestCase import opentracing @@ -24,7 +27,7 @@ from opentelemetry.sdk.trace import TracerSource -class TestShim(unittest.TestCase): +class TestShim(TestCase): # pylint: disable=too-many-public-methods def setUp(self): @@ -551,6 +554,10 @@ def extract(cls, get_from_carrier, carrier): ) @classmethod - def inject(cls, context, set_in_carrier, carrier): - set_in_carrier(carrier, cls.TRACE_ID_KEY, str(context.trace_id)) - set_in_carrier(carrier, cls.SPAN_ID_KEY, str(context.span_id)) + def inject(cls, span, set_in_carrier, carrier): + set_in_carrier( + carrier, cls.TRACE_ID_KEY, str(span.get_context().trace_id) + ) + set_in_carrier( + carrier, cls.SPAN_ID_KEY, str(span.get_context().span_id) + ) diff --git a/ext/opentelemetry-ext-psycopg2/README.rst b/ext/opentelemetry-ext-psycopg2/README.rst new file mode 100644 index 00000000000..d7599492ac4 --- /dev/null +++ b/ext/opentelemetry-ext-psycopg2/README.rst @@ -0,0 +1,29 @@ +OpenTelemetry Psycopg integration +================================= + +The integration with PostgreSQL supports the `Psycopg`_ library and is specified +to ``trace_integration`` using ``'PostgreSQL'``. + +.. Psycopg: http://initd.org/psycopg/ + +Usage +----- + +.. code:: python + import psycopg2 + from opentelemetry import trace + from opentelemetry.sdk.trace import TracerSource + from opentelemetry.trace.ext.psycopg2 import trace_integration + + trace.set_preferred_tracer_source_implementation(lambda T: TracerSource()) + tracer = trace.tracer_source().get_tracer(__name__) + trace_integration(tracer) + cnx = psycopg2.connect(database='Database') + cursor = cnx.cursor() + cursor.execute("INSERT INTO test (testField) VALUES (123)") + cursor.close() + cnx.close() + +References +---------- +* `OpenTelemetry Project `_ \ No newline at end of file diff --git a/ext/opentelemetry-ext-psycopg2/setup.cfg b/ext/opentelemetry-ext-psycopg2/setup.cfg new file mode 100644 index 00000000000..f26c5918ebf --- /dev/null +++ b/ext/opentelemetry-ext-psycopg2/setup.cfg @@ -0,0 +1,47 @@ +# Copyright 2020, OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +[metadata] +name = opentelemetry-ext-psycopg2 +description = OpenTelemetry psycopg2 integration +long_description = file: README.rst +long_description_content_type = text/x-rst +author = OpenTelemetry Authors +author_email = cncf-opentelemetry-contributors@lists.cncf.io +url = https://github.com/open-telemetry/opentelemetry-python/ext/opentelemetry-ext-psycopg2 +platforms = any +license = Apache-2.0 +classifiers = + Development Status :: 3 - Alpha + Intended Audience :: Developers + License :: OSI Approved :: Apache Software License + Programming Language :: Python + Programming Language :: Python :: 3 + Programming Language :: Python :: 3.4 + Programming Language :: Python :: 3.5 + Programming Language :: Python :: 3.6 + Programming Language :: Python :: 3.7 + +[options] +python_requires = >=3.4 +package_dir= + =src +packages=find_namespace: +install_requires = + opentelemetry-api >= 0.4.dev0 + psycopg2-binary >= 2.7.3.1 + wrapt >= 1.0.0, < 2.0.0 + +[options.packages.find] +where = src \ No newline at end of file diff --git a/ext/opentelemetry-ext-psycopg2/setup.py b/ext/opentelemetry-ext-psycopg2/setup.py new file mode 100644 index 00000000000..a84391e6dd4 --- /dev/null +++ b/ext/opentelemetry-ext-psycopg2/setup.py @@ -0,0 +1,26 @@ +# Copyright 2020, OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os + +import setuptools + +BASE_DIR = os.path.dirname(__file__) +VERSION_FILENAME = os.path.join( + BASE_DIR, "src", "opentelemetry", "ext", "psycopg2", "version.py" +) +PACKAGE_INFO = {} +with open(VERSION_FILENAME) as f: + exec(f.read(), PACKAGE_INFO) + +setuptools.setup(version=PACKAGE_INFO["__version__"]) diff --git a/ext/opentelemetry-ext-psycopg2/src/opentelemetry/ext/psycopg2/__init__.py b/ext/opentelemetry-ext-psycopg2/src/opentelemetry/ext/psycopg2/__init__.py new file mode 100644 index 00000000000..41816884892 --- /dev/null +++ b/ext/opentelemetry-ext-psycopg2/src/opentelemetry/ext/psycopg2/__init__.py @@ -0,0 +1,96 @@ +# Copyright 2020, OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +The opentelemetry-ext-psycopg2 package allows tracing PostgreSQL queries made by the +Psycopg2 library. +""" + +import logging +import typing + +import psycopg2 +import wrapt +from psycopg2.sql import Composable + +from opentelemetry.ext.dbapi import DatabaseApiIntegration, TracedCursor +from opentelemetry.trace import Tracer + +logger = logging.getLogger(__name__) + +DATABASE_COMPONENT = "postgresql" +DATABASE_TYPE = "sql" + + +def trace_integration(tracer): + """Integrate with PostgreSQL Psycopg library. + Psycopg: http://initd.org/psycopg/ + """ + + connection_attributes = { + "database": "info.dbname", + "port": "info.port", + "host": "info.host", + "user": "info.user", + } + db_integration = DatabaseApiIntegration( + tracer, + DATABASE_COMPONENT, + database_type=DATABASE_TYPE, + connection_attributes=connection_attributes, + ) + + # pylint: disable=unused-argument + def wrap_connect( + connect_func: typing.Callable[..., any], + instance: typing.Any, + args: typing.Tuple[any, any], + kwargs: typing.Dict[any, any], + ): + connection = connect_func(*args, **kwargs) + db_integration.get_connection_attributes(connection) + connection.cursor_factory = PsycopgTraceCursor + return connection + + try: + wrapt.wrap_function_wrapper(psycopg2, "connect", wrap_connect) + except Exception as ex: # pylint: disable=broad-except + logger.warning("Failed to integrate with pyscopg2. %s", str(ex)) + + class PsycopgTraceCursor(psycopg2.extensions.cursor): + def __init__(self, *args, **kwargs): + self._traced_cursor = TracedCursor(db_integration) + super(PsycopgTraceCursor, self).__init__(*args, **kwargs) + + # pylint: disable=redefined-builtin + def execute(self, query, vars=None): + if isinstance(query, Composable): + query = query.as_string(self) + return self._traced_cursor.traced_execution( + super(PsycopgTraceCursor, self).execute, query, vars + ) + + # pylint: disable=redefined-builtin + def executemany(self, query, vars): + if isinstance(query, Composable): + query = query.as_string(self) + return self._traced_cursor.traced_execution( + super(PsycopgTraceCursor, self).executemany, query, vars + ) + + # pylint: disable=redefined-builtin + def callproc(self, procname, vars=None): + return self._traced_cursor.traced_execution( + super(PsycopgTraceCursor, self).callproc, procname, vars + ) diff --git a/ext/opentelemetry-ext-psycopg2/src/opentelemetry/ext/psycopg2/version.py b/ext/opentelemetry-ext-psycopg2/src/opentelemetry/ext/psycopg2/version.py new file mode 100644 index 00000000000..6b39cd19b59 --- /dev/null +++ b/ext/opentelemetry-ext-psycopg2/src/opentelemetry/ext/psycopg2/version.py @@ -0,0 +1,15 @@ +# Copyright 2020, OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__version__ = "0.4.dev0" diff --git a/ext/opentelemetry-ext-psycopg2/tests/__init__.py b/ext/opentelemetry-ext-psycopg2/tests/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/ext/opentelemetry-ext-psycopg2/tests/test_psycopg2_integration.py b/ext/opentelemetry-ext-psycopg2/tests/test_psycopg2_integration.py new file mode 100644 index 00000000000..56ab3a8aae7 --- /dev/null +++ b/ext/opentelemetry-ext-psycopg2/tests/test_psycopg2_integration.py @@ -0,0 +1,30 @@ +# Copyright 2020, OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +from unittest import mock + +import psycopg2 + +from opentelemetry import trace as trace_api +from opentelemetry.ext.psycopg2 import trace_integration + + +class TestPostgresqlIntegration(unittest.TestCase): + def test_trace_integration(self): + tracer = trace_api.DefaultTracer() + with mock.patch("psycopg2.connect"): + trace_integration(tracer) + cnx = psycopg2.connect(database="test") + self.assertIsNotNone(cnx.cursor_factory) diff --git a/ext/opentelemetry-ext-pymongo/tests/test_pymongo_integration.py b/ext/opentelemetry-ext-pymongo/tests/test_pymongo.py similarity index 99% rename from ext/opentelemetry-ext-pymongo/tests/test_pymongo_integration.py rename to ext/opentelemetry-ext-pymongo/tests/test_pymongo.py index 6c99e09e711..0889d9d994b 100644 --- a/ext/opentelemetry-ext-pymongo/tests/test_pymongo_integration.py +++ b/ext/opentelemetry-ext-pymongo/tests/test_pymongo.py @@ -20,7 +20,7 @@ from opentelemetry.util import time_ns -class TestPymongoIntegration(unittest.TestCase): +class TestPymongo(unittest.TestCase): def test_trace_integration(self): mock_register = mock.Mock() patch = mock.patch( diff --git a/ext/opentelemetry-ext-zipkin/src/opentelemetry/ext/zipkin/__init__.py b/ext/opentelemetry-ext-zipkin/src/opentelemetry/ext/zipkin/__init__.py index e0b5791d1e1..fec4da8c3ed 100644 --- a/ext/opentelemetry-ext-zipkin/src/opentelemetry/ext/zipkin/__init__.py +++ b/ext/opentelemetry-ext-zipkin/src/opentelemetry/ext/zipkin/__init__.py @@ -101,10 +101,7 @@ def export(self, spans: Sequence[Span]) -> SpanExportResult: def _translate_to_zipkin(self, spans: Sequence[Span]): - local_endpoint = { - "serviceName": self.service_name, - "port": self.port, - } + local_endpoint = {"serviceName": self.service_name, "port": self.port} if self.ipv4 is not None: local_endpoint["ipv4"] = self.ipv4 diff --git a/ext/opentelemetry-ext-zipkin/tests/test_zipkin_exporter.py b/ext/opentelemetry-ext-zipkin/tests/test_zipkin_exporter.py index e2bdb413052..467bc610bd8 100644 --- a/ext/opentelemetry-ext-zipkin/tests/test_zipkin_exporter.py +++ b/ext/opentelemetry-ext-zipkin/tests/test_zipkin_exporter.py @@ -114,9 +114,7 @@ def test_export(self): ) span_context = trace_api.SpanContext( - trace_id, - span_id, - trace_options=TraceOptions(TraceOptions.SAMPLED), + trace_id, span_id, trace_options=TraceOptions(TraceOptions.SAMPLED) ) parent_context = trace_api.SpanContext(trace_id, parent_id) other_context = trace_api.SpanContext(trace_id, other_id) @@ -168,10 +166,7 @@ def test_export(self): otel_spans[2].end(end_time=end_times[2]) service_name = "test-service" - local_endpoint = { - "serviceName": service_name, - "port": 9411, - } + local_endpoint = {"serviceName": service_name, "port": 9411} exporter = ZipkinSpanExporter(service_name) expected = [ diff --git a/opentelemetry-api/setup.py b/opentelemetry-api/setup.py index ee8adf26aeb..fad86f171b8 100644 --- a/opentelemetry-api/setup.py +++ b/opentelemetry-api/setup.py @@ -56,4 +56,11 @@ "/tree/master/opentelemetry-api" ), zip_safe=False, + entry_points={ + "opentelemetry_context": [ + "default_context = " + "opentelemetry.context.default_context:" + "DefaultRuntimeContext", + ] + }, ) diff --git a/opentelemetry-api/src/opentelemetry/context/__init__.py b/opentelemetry-api/src/opentelemetry/context/__init__.py index 43a7722f885..63de570abc2 100644 --- a/opentelemetry-api/src/opentelemetry/context/__init__.py +++ b/opentelemetry-api/src/opentelemetry/context/__init__.py @@ -12,141 +12,119 @@ # See the License for the specific language governing permissions and # limitations under the License. - -""" -The OpenTelemetry context module provides abstraction layer on top of -thread-local storage and contextvars. The long term direction is to switch to -contextvars provided by the Python runtime library. - -A global object ``Context`` is provided to access all the context related -functionalities:: - - >>> from opentelemetry.context import Context - >>> Context.foo = 1 - >>> Context.foo = 2 - >>> Context.foo - 2 - -When explicit thread is used, a helper function -``Context.with_current_context`` can be used to carry the context across -threads:: - - from threading import Thread - from opentelemetry.context import Context - - def work(name): - print('Entering worker:', Context) - Context.operation_id = name - print('Exiting worker:', Context) - - if __name__ == '__main__': - print('Main thread:', Context) - Context.operation_id = 'main' - - print('Main thread:', Context) - - # by default context is not propagated to worker thread - thread = Thread(target=work, args=('foo',)) - thread.start() - thread.join() - - print('Main thread:', Context) - - # user can propagate context explicitly - thread = Thread( - target=Context.with_current_context(work), - args=('bar',), - ) - thread.start() - thread.join() - - print('Main thread:', Context) - -Here goes another example using thread pool:: - - import time - import threading - - from multiprocessing.dummy import Pool as ThreadPool - from opentelemetry.context import Context - - _console_lock = threading.Lock() - - def println(msg): - with _console_lock: - print(msg) - - def work(name): - println('Entering worker[{}]: {}'.format(name, Context)) - Context.operation_id = name - time.sleep(0.01) - println('Exiting worker[{}]: {}'.format(name, Context)) - - if __name__ == "__main__": - println('Main thread: {}'.format(Context)) - Context.operation_id = 'main' - pool = ThreadPool(2) # create a thread pool with 2 threads - pool.map(Context.with_current_context(work), [ - 'bear', - 'cat', - 'dog', - 'horse', - 'rabbit', - ]) - pool.close() - pool.join() - println('Main thread: {}'.format(Context)) - -Here goes a simple demo of how async could work in Python 3.7+:: - - import asyncio - - from opentelemetry.context import Context - - class Span(object): - def __init__(self, name): - self.name = name - self.parent = Context.current_span - - def __repr__(self): - return ('{}(name={}, parent={})' - .format( - type(self).__name__, - self.name, - self.parent, - )) - - async def __aenter__(self): - Context.current_span = self - - async def __aexit__(self, exc_type, exc, tb): - Context.current_span = self.parent - - async def main(): - print(Context) - async with Span('foo'): - print(Context) - await asyncio.sleep(0.1) - async with Span('bar'): - print(Context) - await asyncio.sleep(0.1) - print(Context) - await asyncio.sleep(0.1) - print(Context) - - if __name__ == '__main__': - asyncio.run(main()) -""" - -from .base_context import BaseRuntimeContext - -__all__ = ["Context"] - -try: - from .async_context import AsyncRuntimeContext - - Context = AsyncRuntimeContext() # type: BaseRuntimeContext -except ImportError: - from .thread_local_context import ThreadLocalRuntimeContext - - Context = ThreadLocalRuntimeContext() +import logging +import typing +from os import environ + +from pkg_resources import iter_entry_points + +from opentelemetry.context.context import Context, RuntimeContext + +logger = logging.getLogger(__name__) +_RUNTIME_CONTEXT = None # type: typing.Optional[RuntimeContext] + + +def get_value(key: str, context: typing.Optional[Context] = None) -> "object": + """To access the local state of a concern, the RuntimeContext API + provides a function which takes a context and a key as input, + and returns a value. + + Args: + key: The key of the value to retrieve. + context: The context from which to retrieve the value, if None, the current context is used. + """ + return context.get(key) if context is not None else get_current().get(key) + + +def set_value( + key: str, value: "object", context: typing.Optional[Context] = None +) -> Context: + """To record the local state of a cross-cutting concern, the + RuntimeContext API provides a function which takes a context, a + key, and a value as input, and returns an updated context + which contains the new value. + + Args: + key: The key of the entry to set + value: The value of the entry to set + context: The context to copy, if None, the current context is used + """ + if context is None: + context = get_current() + new_values = context.copy() + new_values[key] = value + return Context(new_values) + + +def remove_value( + key: str, context: typing.Optional[Context] = None +) -> Context: + """To remove a value, this method returns a new context with the key + cleared. Note that the removed value still remains present in the old + context. + + Args: + key: The key of the entry to remove + context: The context to copy, if None, the current context is used + """ + if context is None: + context = get_current() + new_values = context.copy() + new_values.pop(key, None) + return Context(new_values) + + +def get_current() -> Context: + """To access the context associated with program execution, + the RuntimeContext API provides a function which takes no arguments + and returns a RuntimeContext. + """ + + global _RUNTIME_CONTEXT # pylint: disable=global-statement + if _RUNTIME_CONTEXT is None: + # FIXME use a better implementation of a configuration manager to avoid having + # to get configuration values straight from environment variables + + configured_context = environ.get( + "OPENTELEMETRY_CONTEXT", "default_context" + ) # type: str + try: + _RUNTIME_CONTEXT = next( + iter_entry_points("opentelemetry_context", configured_context) + ).load()() + except Exception: # pylint: disable=broad-except + logger.error("Failed to load context: %s", configured_context) + + return _RUNTIME_CONTEXT.get_current() # type:ignore + + +def set_current(context: Context) -> Context: + """To associate a context with program execution, the Context + API provides a function which takes a Context. + + Args: + context: The context to use as current. + """ + old_context = get_current() + _RUNTIME_CONTEXT.set_current(context) # type:ignore + return old_context + + +def with_current_context( + func: typing.Callable[..., "object"] +) -> typing.Callable[..., "object"]: + """Capture the current context and apply it to the provided func.""" + + caller_context = get_current() + + def call_with_current_context( + *args: "object", **kwargs: "object" + ) -> "object": + try: + backup = get_current() + set_current(caller_context) + return func(*args, **kwargs) + finally: + set_current(backup) + + return call_with_current_context diff --git a/opentelemetry-api/src/opentelemetry/context/async_context.py b/opentelemetry-api/src/opentelemetry/context/async_context.py deleted file mode 100644 index 267059fb31a..00000000000 --- a/opentelemetry-api/src/opentelemetry/context/async_context.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright 2019, OpenTelemetry Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -try: - from contextvars import ContextVar -except ImportError: - pass -else: - import typing # pylint: disable=unused-import - from . import base_context - - class AsyncRuntimeContext(base_context.BaseRuntimeContext): - class Slot(base_context.BaseRuntimeContext.Slot): - def __init__(self, name: str, default: object): - # pylint: disable=super-init-not-called - self.name = name - self.contextvar = ContextVar(name) # type: ContextVar[object] - self.default = base_context.wrap_callable( - default - ) # type: typing.Callable[..., object] - - def clear(self) -> None: - self.contextvar.set(self.default()) - - def get(self) -> object: - try: - return self.contextvar.get() - except LookupError: - value = self.default() - self.set(value) - return value - - def set(self, value: object) -> None: - self.contextvar.set(value) diff --git a/opentelemetry-api/src/opentelemetry/context/base_context.py b/opentelemetry-api/src/opentelemetry/context/base_context.py deleted file mode 100644 index 99d6869dd52..00000000000 --- a/opentelemetry-api/src/opentelemetry/context/base_context.py +++ /dev/null @@ -1,130 +0,0 @@ -# Copyright 2019, OpenTelemetry Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import threading -import typing -from contextlib import contextmanager - - -def wrap_callable(target: "object") -> typing.Callable[[], object]: - if callable(target): - return target - return lambda: target - - -class BaseRuntimeContext: - class Slot: - def __init__(self, name: str, default: "object"): - raise NotImplementedError - - def clear(self) -> None: - raise NotImplementedError - - def get(self) -> "object": - raise NotImplementedError - - def set(self, value: "object") -> None: - raise NotImplementedError - - _lock = threading.Lock() - _slots = {} # type: typing.Dict[str, 'BaseRuntimeContext.Slot'] - - @classmethod - def clear(cls) -> None: - """Clear all slots to their default value.""" - keys = cls._slots.keys() - for name in keys: - slot = cls._slots[name] - slot.clear() - - @classmethod - def register_slot( - cls, name: str, default: "object" = None - ) -> "BaseRuntimeContext.Slot": - """Register a context slot with an optional default value. - - :type name: str - :param name: The name of the context slot. - - :type default: object - :param name: The default value of the slot, can be a value or lambda. - - :returns: The registered slot. - """ - with cls._lock: - if name not in cls._slots: - cls._slots[name] = cls.Slot(name, default) - return cls._slots[name] - - def apply(self, snapshot: typing.Dict[str, "object"]) -> None: - """Set the current context from a given snapshot dictionary""" - - for name in snapshot: - setattr(self, name, snapshot[name]) - - def snapshot(self) -> typing.Dict[str, "object"]: - """Return a dictionary of current slots by reference.""" - - keys = self._slots.keys() - return dict((n, self._slots[n].get()) for n in keys) - - def __repr__(self) -> str: - return "{}({})".format(type(self).__name__, self.snapshot()) - - def __getattr__(self, name: str) -> "object": - if name not in self._slots: - self.register_slot(name, None) - slot = self._slots[name] - return slot.get() - - def __setattr__(self, name: str, value: "object") -> None: - if name not in self._slots: - self.register_slot(name, None) - slot = self._slots[name] - slot.set(value) - - def __getitem__(self, name: str) -> "object": - return self.__getattr__(name) - - def __setitem__(self, name: str, value: "object") -> None: - self.__setattr__(name, value) - - @contextmanager # type: ignore - def use(self, **kwargs: typing.Dict[str, object]) -> typing.Iterator[None]: - snapshot = {key: self[key] for key in kwargs} - for key in kwargs: - self[key] = kwargs[key] - yield - for key in kwargs: - self[key] = snapshot[key] - - def with_current_context( - self, func: typing.Callable[..., "object"] - ) -> typing.Callable[..., "object"]: - """Capture the current context and apply it to the provided func. - """ - - caller_context = self.snapshot() - - def call_with_current_context( - *args: "object", **kwargs: "object" - ) -> "object": - try: - backup_context = self.snapshot() - self.apply(caller_context) - return func(*args, **kwargs) - finally: - self.apply(backup_context) - - return call_with_current_context diff --git a/opentelemetry-api/src/opentelemetry/context/context.py b/opentelemetry-api/src/opentelemetry/context/context.py new file mode 100644 index 00000000000..148312a884c --- /dev/null +++ b/opentelemetry-api/src/opentelemetry/context/context.py @@ -0,0 +1,44 @@ +# Copyright 2020, OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import typing +from abc import ABC, abstractmethod + + +class Context(typing.Dict[str, object]): + def __setitem__(self, key: str, value: object) -> None: + raise ValueError + + +class RuntimeContext(ABC): + """The RuntimeContext interface provides a wrapper for the different + mechanisms that are used to propagate context in Python. + Implementations can be made available via entry_points and + selected through environment variables. + """ + + @abstractmethod + def set_current(self, context: Context) -> None: + """ Sets the current `Context` object. + + Args: + context: The Context to set. + """ + + @abstractmethod + def get_current(self) -> Context: + """ Returns the current `Context` object. """ + + +__all__ = ["Context", "RuntimeContext"] diff --git a/opentelemetry-api/src/opentelemetry/context/default_context.py b/opentelemetry-api/src/opentelemetry/context/default_context.py new file mode 100644 index 00000000000..6c83f839d30 --- /dev/null +++ b/opentelemetry-api/src/opentelemetry/context/default_context.py @@ -0,0 +1,34 @@ +# Copyright 2020, OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from opentelemetry.context.context import Context, RuntimeContext + + +class DefaultRuntimeContext(RuntimeContext): + """A default implementation of the RuntimeContext interface using + a dictionary to store values. + """ + + def __init__(self) -> None: + self._current_context = Context() + + def set_current(self, context: Context) -> None: + """See `opentelemetry.context.RuntimeContext.set_current`.""" + self._current_context = context + + def get_current(self) -> Context: + """See `opentelemetry.context.RuntimeContext.get_current`.""" + return self._current_context + + +__all__ = ["DefaultRuntimeContext"] diff --git a/opentelemetry-api/src/opentelemetry/context/propagation/httptextformat.py b/opentelemetry-api/src/opentelemetry/context/propagation/httptextformat.py index 9b6098a9a42..b64a298c410 100644 --- a/opentelemetry-api/src/opentelemetry/context/propagation/httptextformat.py +++ b/opentelemetry-api/src/opentelemetry/context/propagation/httptextformat.py @@ -15,7 +15,7 @@ import abc import typing -from opentelemetry.trace import SpanContext +from opentelemetry.trace import Span, SpanContext _T = typing.TypeVar("_T") @@ -95,9 +95,9 @@ def extract( @abc.abstractmethod def inject( - self, context: SpanContext, set_in_carrier: Setter[_T], carrier: _T + self, span: Span, set_in_carrier: Setter[_T], carrier: _T ) -> None: - """Inject values from a SpanContext into a carrier. + """Inject values from a Span into a carrier. inject enables the propagation of values into HTTP clients or other objects which perform an HTTP request. Implementations diff --git a/opentelemetry-api/src/opentelemetry/context/propagation/tracecontexthttptextformat.py b/opentelemetry-api/src/opentelemetry/context/propagation/tracecontexthttptextformat.py index 5d00632ed17..6f50f008394 100644 --- a/opentelemetry-api/src/opentelemetry/context/propagation/tracecontexthttptextformat.py +++ b/opentelemetry-api/src/opentelemetry/context/propagation/tracecontexthttptextformat.py @@ -105,10 +105,13 @@ def extract( @classmethod def inject( cls, - context: trace.SpanContext, + span: trace.Span, set_in_carrier: httptextformat.Setter[_T], carrier: _T, ) -> None: + + context = span.get_context() + if context == trace.INVALID_SPAN_CONTEXT: return traceparent_string = "00-{:032x}-{:016x}-{:02x}".format( diff --git a/opentelemetry-api/src/opentelemetry/context/thread_local_context.py b/opentelemetry-api/src/opentelemetry/context/thread_local_context.py deleted file mode 100644 index b60914f846c..00000000000 --- a/opentelemetry-api/src/opentelemetry/context/thread_local_context.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright 2019, OpenTelemetry Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import threading -import typing # pylint: disable=unused-import - -from . import base_context - - -class ThreadLocalRuntimeContext(base_context.BaseRuntimeContext): - class Slot(base_context.BaseRuntimeContext.Slot): - _thread_local = threading.local() - - def __init__(self, name: str, default: "object"): - # pylint: disable=super-init-not-called - self.name = name - self.default = base_context.wrap_callable( - default - ) # type: typing.Callable[..., object] - - def clear(self) -> None: - setattr(self._thread_local, self.name, self.default()) - - def get(self) -> "object": - try: - got = getattr(self._thread_local, self.name) # type: object - return got - except AttributeError: - value = self.default() - self.set(value) - return value - - def set(self, value: "object") -> None: - setattr(self._thread_local, self.name, value) diff --git a/opentelemetry-api/src/opentelemetry/distributedcontext/__init__.py b/opentelemetry-api/src/opentelemetry/distributedcontext/__init__.py index 38ef3739b90..a89d9825502 100644 --- a/opentelemetry-api/src/opentelemetry/distributedcontext/__init__.py +++ b/opentelemetry-api/src/opentelemetry/distributedcontext/__init__.py @@ -17,6 +17,9 @@ import typing from contextlib import contextmanager +from opentelemetry.context import get_value, set_current, set_value +from opentelemetry.context.context import Context + PRINTABLE = frozenset( itertools.chain( string.ascii_letters, string.digits, string.punctuation, " " @@ -100,7 +103,9 @@ def get_entry_value(self, key: EntryKey) -> typing.Optional[EntryValue]: class DistributedContextManager: - def get_current_context(self) -> typing.Optional[DistributedContext]: + def get_current_context( + self, context: typing.Optional[Context] = None + ) -> typing.Optional[DistributedContext]: """Gets the current DistributedContext. Returns: @@ -123,3 +128,18 @@ def use_context( """ # pylint: disable=no-self-use yield context + + +_DISTRIBUTED_CONTEXT_KEY = "DistributedContext" + + +def distributed_context_from_context( + context: typing.Optional[Context] = None, +) -> DistributedContext: + return get_value(_DISTRIBUTED_CONTEXT_KEY, context) # type: ignore + + +def with_distributed_context( + dctx: DistributedContext, context: typing.Optional[Context] = None +) -> None: + set_current(set_value(_DISTRIBUTED_CONTEXT_KEY, dctx, context=context)) diff --git a/opentelemetry-api/src/opentelemetry/metrics/__init__.py b/opentelemetry-api/src/opentelemetry/metrics/__init__.py index 947d57b976d..5045c38eed9 100644 --- a/opentelemetry-api/src/opentelemetry/metrics/__init__.py +++ b/opentelemetry-api/src/opentelemetry/metrics/__init__.py @@ -40,13 +40,34 @@ class DefaultMetricHandle: Used when no MetricHandle implementation is available. """ + def add(self, value: ValueT) -> None: + """No-op implementation of `CounterHandle` add. + + Args: + value: The value to add to the handle. + """ + + def set(self, value: ValueT) -> None: + """No-op implementation of `GaugeHandle` set. + + Args: + value: The value to set to the handle. + """ + + def record(self, value: ValueT) -> None: + """No-op implementation of `MeasureHandle` record. + + Args: + value: The value to record to the handle. + """ + class CounterHandle: def add(self, value: ValueT) -> None: """Increases the value of the handle by ``value``. Args: - value: The value to record to the handle. + value: The value to add to the handle. """ @@ -55,7 +76,7 @@ def set(self, value: ValueT) -> None: """Sets the current value of the handle to ``value``. Args: - value: The value to record to the handle. + value: The value to set to the handle. """ @@ -87,13 +108,14 @@ class DefaultLabelSet(LabelSet): """ -class Metric: +class Metric(abc.ABC): """Base class for various types of metrics. Metric class that inherit from this class are specialized with the type of handle that the metric holds. """ + @abc.abstractmethod def get_handle(self, label_set: LabelSet) -> "object": """Gets a handle, used for repeated-use of metrics instruments. @@ -120,6 +142,30 @@ def get_handle(self, label_set: LabelSet) -> "DefaultMetricHandle": """ return DefaultMetricHandle() + def add(self, value: ValueT, label_set: LabelSet) -> None: + """No-op implementation of `Counter` add. + + Args: + value: The value to add to the counter metric. + label_set: `LabelSet` to associate with the returned handle. + """ + + def set(self, value: ValueT, label_set: LabelSet) -> None: + """No-op implementation of `Gauge` set. + + Args: + value: The value to set the gauge metric to. + label_set: `LabelSet` to associate with the returned handle. + """ + + def record(self, value: ValueT, label_set: LabelSet) -> None: + """No-op implementation of `Measure` record. + + Args: + value: The value to record to this measure metric. + label_set: `LabelSet` to associate with the returned handle. + """ + class Counter(Metric): """A counter type metric that expresses the computation of a sum.""" @@ -128,12 +174,12 @@ def get_handle(self, label_set: LabelSet) -> "CounterHandle": """Gets a `CounterHandle`.""" return CounterHandle() - def add(self, label_set: LabelSet, value: ValueT) -> None: + def add(self, value: ValueT, label_set: LabelSet) -> None: """Increases the value of the counter by ``value``. Args: - label_set: `LabelSet` to associate with the returned handle. value: The value to add to the counter metric. + label_set: `LabelSet` to associate with the returned handle. """ @@ -150,33 +196,31 @@ def get_handle(self, label_set: LabelSet) -> "GaugeHandle": """Gets a `GaugeHandle`.""" return GaugeHandle() - def set(self, label_set: LabelSet, value: ValueT) -> None: + def set(self, value: ValueT, label_set: LabelSet) -> None: """Sets the value of the gauge to ``value``. Args: - label_set: `LabelSet` to associate with the returned handle. value: The value to set the gauge metric to. + label_set: `LabelSet` to associate with the returned handle. """ class Measure(Metric): """A measure type metric that represent raw stats that are recorded. - Measure metrics represent raw statistics that are recorded. By - default, measure metrics can accept both positive and negatives. - Negative inputs will be discarded when monotonic is True. + Measure metrics represent raw statistics that are recorded. """ def get_handle(self, label_set: LabelSet) -> "MeasureHandle": """Gets a `MeasureHandle` with a float value.""" return MeasureHandle() - def record(self, label_set: LabelSet, value: ValueT) -> None: + def record(self, value: ValueT, label_set: LabelSet) -> None: """Records the ``value`` to the measure. Args: - label_set: `LabelSet` to associate with the returned handle. value: The value to record to this measure metric. + label_set: `LabelSet` to associate with the returned handle. """ @@ -222,7 +266,6 @@ def create_metric( metric_type: Type[MetricT], label_keys: Sequence[str] = (), enabled: bool = True, - monotonic: bool = False, ) -> "Metric": """Creates a ``metric_kind`` metric with type ``value_type``. @@ -234,8 +277,6 @@ def create_metric( metric_type: The type of metric being created. label_keys: The keys for the labels with dynamic values. enabled: Whether to report the metric by default. - monotonic: Whether to only allow non-negative values. - Returns: A new ``metric_type`` metric with values of ``value_type``. """ @@ -269,7 +310,6 @@ def create_metric( metric_type: Type[MetricT], label_keys: Sequence[str] = (), enabled: bool = True, - monotonic: bool = False, ) -> "Metric": # pylint: disable=no-self-use return DefaultMetric() @@ -297,7 +337,12 @@ def meter() -> Meter: if _METER is None: # pylint:disable=protected-access - _METER = loader._load_impl(DefaultMeter, _METER_FACTORY) + try: + _METER = loader._load_impl(Meter, _METER_FACTORY) # type: ignore + except TypeError: + # if we raised an exception trying to instantiate an + # abstract class, default to no-op tracer impl + _METER = DefaultMeter() del _METER_FACTORY return _METER diff --git a/opentelemetry-api/src/opentelemetry/propagators/__init__.py b/opentelemetry-api/src/opentelemetry/propagators/__init__.py index bb75d84c3a4..3974a4cb03a 100644 --- a/opentelemetry-api/src/opentelemetry/propagators/__init__.py +++ b/opentelemetry-api/src/opentelemetry/propagators/__init__.py @@ -64,7 +64,7 @@ def inject( should know how to set header values on the carrier. """ get_global_httptextformat().inject( - tracer.get_current_span().get_context(), set_in_carrier, carrier + tracer.get_current_span(), set_in_carrier, carrier ) diff --git a/opentelemetry-api/src/opentelemetry/trace/propagation/__init__.py b/opentelemetry-api/src/opentelemetry/trace/propagation/__init__.py new file mode 100644 index 00000000000..67d8a76a53e --- /dev/null +++ b/opentelemetry-api/src/opentelemetry/trace/propagation/__init__.py @@ -0,0 +1,26 @@ +# Copyright 2020, OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Optional + +from opentelemetry.trace import INVALID_SPAN_CONTEXT, Span, SpanContext + +_SPAN_CONTEXT_KEY = "extracted-span-context" +_SPAN_KEY = "current-span" + + +def get_span_key(tracer_source_id: Optional[str] = None) -> str: + key = _SPAN_KEY + if tracer_source_id is not None: + key = "{}-{}".format(key, tracer_source_id) + return key diff --git a/opentelemetry-api/src/opentelemetry/trace/sampling.py b/opentelemetry-api/src/opentelemetry/trace/sampling.py index 2425c27c072..503c2e03eb0 100644 --- a/opentelemetry-api/src/opentelemetry/trace/sampling.py +++ b/opentelemetry-api/src/opentelemetry/trace/sampling.py @@ -82,13 +82,13 @@ def __init__(self, rate: float): self._rate = rate self._bound = self.get_bound_for_rate(self._rate) - # The sampler checks the last 8 bytes of the trace ID to decide whether to - # sample a given trace. - CHECK_BYTES = 0xFFFFFFFFFFFFFFFF + # For compatibility with 64 bit trace IDs, the sampler checks the 64 + # low-order bits of the trace ID to decide whether to sample a given trace. + TRACE_ID_LIMIT = (1 << 64) - 1 @classmethod def get_bound_for_rate(cls, rate: float) -> int: - return round(rate * (cls.CHECK_BYTES + 1)) + return round(rate * (cls.TRACE_ID_LIMIT + 1)) @property def rate(self) -> float: @@ -115,7 +115,7 @@ def should_sample( if parent_context is not None: return Decision(parent_context.trace_options.sampled) - return Decision(trace_id & self.CHECK_BYTES < self.bound) + return Decision(trace_id & self.TRACE_ID_LIMIT < self.bound) # Samplers that ignore the parent sampling decision and never/always sample. diff --git a/opentelemetry-api/tests/context/propagation/test_tracecontexthttptextformat.py b/opentelemetry-api/tests/context/propagation/test_tracecontexthttptextformat.py index ed952e0dbab..8f283ef8819 100644 --- a/opentelemetry-api/tests/context/propagation/test_tracecontexthttptextformat.py +++ b/opentelemetry-api/tests/context/propagation/test_tracecontexthttptextformat.py @@ -14,6 +14,7 @@ import typing import unittest +from unittest.mock import Mock from opentelemetry import trace from opentelemetry.context.propagation import tracecontexthttptextformat @@ -38,7 +39,8 @@ def test_no_traceparent_header(self): RFC 4.2.2: - If no traceparent header is received, the vendor creates a new trace-id and parent-id that represents the current request. + If no traceparent header is received, the vendor creates a new + trace-id and parent-id that represents the current request. """ output = {} # type:typing.Dict[str, typing.List[str]] span_context = FORMAT.extract(get_as_list, output) @@ -66,8 +68,10 @@ def test_headers_with_tracestate(self): span_context.trace_state, {"foo": "1", "bar": "2", "baz": "3"} ) + mock_span = Mock() + mock_span.configure_mock(**{"get_context.return_value": span_context}) output = {} # type:typing.Dict[str, str] - FORMAT.inject(span_context, dict.__setitem__, output) + FORMAT.inject(mock_span, dict.__setitem__, output) self.assertEqual(output["traceparent"], traceparent_value) for pair in ["foo=1", "bar=2", "baz=3"]: self.assertIn(pair, output["tracestate"]) @@ -81,13 +85,16 @@ def test_invalid_trace_id(self): RFC 3.2.2.3 - If the trace-id value is invalid (for example if it contains non-allowed characters or all - zeros), vendors MUST ignore the traceparent. + If the trace-id value is invalid (for example if it contains + non-allowed characters or all zeros), vendors MUST ignore the + traceparent. RFC 3.3 - If the vendor failed to parse traceparent, it MUST NOT attempt to parse tracestate. - Note that the opposite is not true: failure to parse tracestate MUST NOT affect the parsing of traceparent. + If the vendor failed to parse traceparent, it MUST NOT attempt to + parse tracestate. + Note that the opposite is not true: failure to parse tracestate MUST + NOT affect the parsing of traceparent. """ span_context = FORMAT.extract( get_as_list, @@ -101,19 +108,22 @@ def test_invalid_trace_id(self): self.assertEqual(span_context, trace.INVALID_SPAN_CONTEXT) def test_invalid_parent_id(self): - """If the parent id is invalid, we must ignore the full traceparent header. + """If the parent id is invalid, we must ignore the full traceparent + header. Also ignore any tracestate. RFC 3.2.2.3 - Vendors MUST ignore the traceparent when the parent-id is invalid (for example, - if it contains non-lowercase hex characters). + Vendors MUST ignore the traceparent when the parent-id is invalid (for + example, if it contains non-lowercase hex characters). RFC 3.3 - If the vendor failed to parse traceparent, it MUST NOT attempt to parse tracestate. - Note that the opposite is not true: failure to parse tracestate MUST NOT affect the parsing of traceparent. + If the vendor failed to parse traceparent, it MUST NOT attempt to parse + tracestate. + Note that the opposite is not true: failure to parse tracestate MUST + NOT affect the parsing of traceparent. """ span_context = FORMAT.extract( get_as_list, @@ -131,15 +141,19 @@ def test_no_send_empty_tracestate(self): RFC 3.3.1.1 - Empty and whitespace-only list members are allowed. Vendors MUST accept empty - tracestate headers but SHOULD avoid sending them. + Empty and whitespace-only list members are allowed. Vendors MUST accept + empty tracestate headers but SHOULD avoid sending them. """ output = {} # type:typing.Dict[str, str] - FORMAT.inject( - trace.SpanContext(self.TRACE_ID, self.SPAN_ID), - dict.__setitem__, - output, + mock_span = Mock() + mock_span.configure_mock( + **{ + "get_context.return_value": trace.SpanContext( + self.TRACE_ID, self.SPAN_ID + ) + } ) + FORMAT.inject(mock_span, dict.__setitem__, output) self.assertTrue("traceparent" in output) self.assertFalse("tracestate" in output) @@ -155,7 +169,8 @@ def test_format_not_supported(self): get_as_list, { "traceparent": [ - "00-12345678901234567890123456789012-1234567890123456-00-residue" + "00-12345678901234567890123456789012-" + "1234567890123456-00-residue" ], "tracestate": ["foo=1,bar=2,foo=3"], }, @@ -163,14 +178,14 @@ def test_format_not_supported(self): self.assertEqual(span_context, trace.INVALID_SPAN_CONTEXT) def test_propagate_invalid_context(self): - """Do not propagate invalid trace context. - """ + """Do not propagate invalid trace context.""" output = {} # type:typing.Dict[str, str] - FORMAT.inject(trace.INVALID_SPAN_CONTEXT, dict.__setitem__, output) + FORMAT.inject(trace.INVALID_SPAN, dict.__setitem__, output) self.assertFalse("traceparent" in output) def test_tracestate_empty_header(self): - """Test tracestate with an additional empty header (should be ignored)""" + """Test tracestate with an additional empty header (should be ignored) + """ span_context = FORMAT.extract( get_as_list, { diff --git a/opentelemetry-api/tests/context/test_context.py b/opentelemetry-api/tests/context/test_context.py new file mode 100644 index 00000000000..2536e5149be --- /dev/null +++ b/opentelemetry-api/tests/context/test_context.py @@ -0,0 +1,65 @@ +# Copyright 2020, OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +from opentelemetry import context +from opentelemetry.context.context import Context + + +def do_work() -> None: + context.set_current(context.set_value("say", "bar")) + + +class TestContext(unittest.TestCase): + def setUp(self): + context.set_current(Context()) + + def test_context(self): + self.assertIsNone(context.get_value("say")) + empty = context.get_current() + second = context.set_value("say", "foo") + self.assertEqual(context.get_value("say", context=second), "foo") + + do_work() + self.assertEqual(context.get_value("say"), "bar") + third = context.get_current() + + self.assertIsNone(context.get_value("say", context=empty)) + self.assertEqual(context.get_value("say", context=second), "foo") + self.assertEqual(context.get_value("say", context=third), "bar") + + def test_set_value(self): + first = context.set_value("a", "yyy") + second = context.set_value("a", "zzz") + third = context.set_value("a", "---", first) + self.assertEqual("yyy", context.get_value("a", context=first)) + self.assertEqual("zzz", context.get_value("a", context=second)) + self.assertEqual("---", context.get_value("a", context=third)) + self.assertEqual(None, context.get_value("a")) + + def test_context_is_immutable(self): + with self.assertRaises(ValueError): + # ensure a context + context.get_current()["test"] = "cant-change-immutable" + + def test_set_current(self): + context.set_current(context.set_value("a", "yyy")) + + old_context = context.set_current(context.set_value("a", "zzz")) + self.assertEqual("yyy", context.get_value("a", context=old_context)) + self.assertEqual("zzz", context.get_value("a")) + + context.set_current(old_context) + self.assertEqual("yyy", context.get_value("a")) diff --git a/opentelemetry-api/tests/metrics/test_metrics.py b/opentelemetry-api/tests/metrics/test_metrics.py index a8959266b28..3ec0f81c718 100644 --- a/opentelemetry-api/tests/metrics/test_metrics.py +++ b/opentelemetry-api/tests/metrics/test_metrics.py @@ -13,6 +13,8 @@ # limitations under the License. import unittest +from contextlib import contextmanager +from unittest import mock from opentelemetry import metrics @@ -52,7 +54,7 @@ def test_counter(self): def test_counter_add(self): counter = metrics.Counter() label_set = metrics.LabelSet() - counter.add(label_set, 1) + counter.add(1, label_set) def test_gauge(self): gauge = metrics.Gauge() @@ -63,7 +65,7 @@ def test_gauge(self): def test_gauge_set(self): gauge = metrics.Gauge() label_set = metrics.LabelSet() - gauge.set(label_set, 1) + gauge.set(1, label_set) def test_measure(self): measure = metrics.Measure() @@ -74,7 +76,7 @@ def test_measure(self): def test_measure_record(self): measure = metrics.Measure() label_set = metrics.LabelSet() - measure.record(label_set, 1) + measure.record(1, label_set) def test_default_handle(self): metrics.DefaultMetricHandle() @@ -90,3 +92,34 @@ def test_gauge_handle(self): def test_measure_handle(self): handle = metrics.MeasureHandle() handle.record(1) + + +@contextmanager +# type: ignore +def patch_metrics_globals(meter=None, meter_factory=None): + """Mock metrics._METER and metrics._METER_FACTORY. + + This prevents previous changes to these values from affecting the code in + this scope, and prevents changes in this scope from leaking out and + affecting other tests. + """ + with mock.patch("opentelemetry.metrics._METER", meter): + with mock.patch("opentelemetry.metrics._METER_FACTORY", meter_factory): + yield + + +class TestGlobals(unittest.TestCase): + def test_meter_default_factory(self): + """Check that the default meter is a DefaultMeter.""" + with patch_metrics_globals(): + meter = metrics.meter() + self.assertIsInstance(meter, metrics.DefaultMeter) + # Check that we don't create a new instance on each call + self.assertIs(meter, metrics.meter()) + + def test_meter_custom_factory(self): + """Check that we use the provided factory for custom global meters.""" + mock_meter = mock.Mock(metrics.Meter) + with patch_metrics_globals(meter_factory=lambda _: mock_meter): + meter = metrics.meter() + self.assertIs(meter, mock_meter) diff --git a/opentelemetry-api/tests/trace/test_sampling.py b/opentelemetry-api/tests/trace/test_sampling.py index b456aa91f18..f04aecef45b 100644 --- a/opentelemetry-api/tests/trace/test_sampling.py +++ b/opentelemetry-api/tests/trace/test_sampling.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import sys import unittest from opentelemetry import trace @@ -137,7 +138,7 @@ def test_probability_sampler(self): trace.SpanContext( 0xDEADBEF0, 0xDEADBEF1, trace_options=TO_DEFAULT ), - 0x8000000000000000, + 0x7FFFFFFFFFFFFFFF, 0xDEADBEEF, "span name", ).sampled @@ -147,7 +148,7 @@ def test_probability_sampler(self): trace.SpanContext( 0xDEADBEF0, 0xDEADBEF1, trace_options=TO_SAMPLED ), - 0x8000000000000001, + 0x8000000000000000, 0xDEADBEEF, "span name", ).sampled @@ -189,14 +190,13 @@ def test_probability_sampler_limits(self): sampling.ProbabilitySampler.get_bound_for_rate(2 ** -64), 0x1 ) - # Sample every trace with (last 8 bytes of) trace ID less than - # 0xffffffffffffffff. In principle this is the highest possible - # sampling rate less than 1, but we can't actually express this rate as - # a float! + # Sample every trace with trace ID less than 0xffffffffffffffff. In + # principle this is the highest possible sampling rate less than 1, but + # we can't actually express this rate as a float! # # In practice, the highest possible sampling rate is: # - # round(sys.float_info.epsilon * 2 ** 64) + # 1 - sys.float_info.epsilon almost_always_on = sampling.ProbabilitySampler(1 - 2 ** -64) self.assertTrue( @@ -212,12 +212,29 @@ def test_probability_sampler_limits(self): # self.assertFalse( # almost_always_on.should_sample( # None, - # 0xffffffffffffffff, - # 0xdeadbeef, + # 0xFFFFFFFFFFFFFFFF, + # 0xDEADBEEF, # "span name", # ).sampled # ) # self.assertEqual( # sampling.ProbabilitySampler.get_bound_for_rate(1 - 2 ** -64)), - # 0xffffffffffffffff, + # 0xFFFFFFFFFFFFFFFF, # ) + + # Check that a sampler with the highest effective sampling rate < 1 + # refuses to sample traces with trace ID 0xffffffffffffffff. + almost_almost_always_on = sampling.ProbabilitySampler( + 1 - sys.float_info.epsilon + ) + self.assertFalse( + almost_almost_always_on.should_sample( + None, 0xFFFFFFFFFFFFFFFF, 0xDEADBEEF, "span name" + ).sampled + ) + # Check that the higest effective sampling rate is actually lower than + # the highest theoretical sampling rate. If this test fails the test + # above is wrong. + self.assertLess( + almost_almost_always_on.bound, 0xFFFFFFFFFFFFFFFF, + ) diff --git a/opentelemetry-sdk/setup.py b/opentelemetry-sdk/setup.py index cbfb0f075d4..7e88bb3bfe5 100644 --- a/opentelemetry-sdk/setup.py +++ b/opentelemetry-sdk/setup.py @@ -44,7 +44,7 @@ include_package_data=True, long_description=open("README.rst").read(), long_description_content_type="text/x-rst", - install_requires=["opentelemetry-api==0.4.dev0"], + install_requires=["opentelemetry-api==0.4.dev0", "aiocontextvars"], extras_require={}, license="Apache-2.0", package_dir={"": "src"}, @@ -56,4 +56,14 @@ "/tree/master/opentelemetry-sdk" ), zip_safe=False, + entry_points={ + "opentelemetry_context": [ + "contextvars_context = " + "opentelemetry.sdk.context.contextvars_context:" + "ContextVarsRuntimeContext", + "threadlocal_context = " + "opentelemetry.sdk.context.threadlocal_context:" + "ThreadLocalRuntimeContext", + ] + }, ) diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/context/aiocontextvarsfix.py b/opentelemetry-sdk/src/opentelemetry/sdk/context/aiocontextvarsfix.py new file mode 100644 index 00000000000..6aa17793788 --- /dev/null +++ b/opentelemetry-sdk/src/opentelemetry/sdk/context/aiocontextvarsfix.py @@ -0,0 +1,86 @@ +# type: ignore +# Copyright 2020, OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# This module is a patch to allow aiocontextvars to work for older versions +# of Python 3.5. It is copied and pasted from: +# https://github.com/fantix/aiocontextvars/issues/88#issuecomment-522276290 + +import asyncio +import asyncio.coroutines +import asyncio.futures +import concurrent.futures + +if not hasattr(asyncio, "_get_running_loop"): + # noinspection PyCompatibility + # pylint:disable=protected-access + import asyncio.events + from threading import local as threading_local + + if not hasattr(asyncio.events, "_get_running_loop"): + + class _RunningLoop(threading_local): + _loop = None + + _running_loop = _RunningLoop() + + def _get_running_loop(): + return _running_loop._loop + + def set_running_loop(loop): # noqa: F811 + _running_loop._loop = loop + + def _get_event_loop(): + current_loop = _get_running_loop() + if current_loop is not None: + return current_loop + return asyncio.events.get_event_loop_policy().get_event_loop() + + asyncio.events.get_event_loop = _get_event_loop + asyncio.events._get_running_loop = _get_running_loop + asyncio.events._set_running_loop = set_running_loop + + asyncio._get_running_loop = asyncio.events._get_running_loop + asyncio._set_running_loop = asyncio.events._set_running_loop + +# noinspection PyUnresolvedReferences +import aiocontextvars # pylint: disable=unused-import,wrong-import-position # noqa # isort:skip + + +def _run_coroutine_threadsafe(coro, loop): + """ + Patch to create task in the same thread instead of in the callback. + This ensures that contextvars get copied. Python 3.7 copies contextvars + without this. + """ + if not asyncio.coroutines.iscoroutine(coro): + raise TypeError("A coroutine object is required") + future = concurrent.futures.Future() + task = asyncio.ensure_future(coro, loop=loop) + + def callback() -> None: + try: + # noinspection PyProtectedMember,PyUnresolvedReferences + # pylint:disable=protected-access + asyncio.futures._chain_future(task, future) + except Exception as exc: + if future.set_running_or_notify_cancel(): + future.set_exception(exc) + raise + + loop.call_soon_threadsafe(callback) + return future + + +asyncio.run_coroutine_threadsafe = _run_coroutine_threadsafe diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/context/contextvars_context.py b/opentelemetry-sdk/src/opentelemetry/sdk/context/contextvars_context.py new file mode 100644 index 00000000000..0a350e26997 --- /dev/null +++ b/opentelemetry-sdk/src/opentelemetry/sdk/context/contextvars_context.py @@ -0,0 +1,48 @@ +# Copyright 2020, OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from contextvars import ContextVar +from sys import version_info + +from opentelemetry.context import Context +from opentelemetry.context.context import RuntimeContext + +if (3, 5, 3) <= version_info < (3, 7): + import aiocontextvars # type: ignore # pylint:disable=unused-import + +elif (3, 4) < version_info <= (3, 5, 2): + import opentelemetry.sdk.context.aiocontextvarsfix # pylint:disable=unused-import + + +class ContextVarsRuntimeContext(RuntimeContext): + """An implementation of the RuntimeContext interface which wraps ContextVar under + the hood. This is the prefered implementation for usage with Python 3.5+ + """ + + _CONTEXT_KEY = "current_context" + + def __init__(self) -> None: + self._current_context = ContextVar( + self._CONTEXT_KEY, default=Context() + ) + + def set_current(self, context: Context) -> None: + """See `opentelemetry.context.RuntimeContext.set_current`.""" + self._current_context.set(context) + + def get_current(self) -> Context: + """See `opentelemetry.context.RuntimeContext.get_current`.""" + return self._current_context.get() + + +__all__ = ["ContextVarsRuntimeContext"] diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/context/propagation/b3_format.py b/opentelemetry-sdk/src/opentelemetry/sdk/context/propagation/b3_format.py index 7d59fddb9e5..4c9214dbccb 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/context/propagation/b3_format.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/context/propagation/b3_format.py @@ -27,6 +27,7 @@ class B3Format(HTTPTextFormat): SINGLE_HEADER_KEY = "b3" TRACE_ID_KEY = "x-b3-traceid" SPAN_ID_KEY = "x-b3-spanid" + PARENT_SPAN_ID_KEY = "x-b3-parentspanid" SAMPLED_KEY = "x-b3-sampled" FLAGS_KEY = "x-b3-flags" _SAMPLE_PROPAGATE_VALUES = set(["1", "True", "true", "d"]) @@ -55,7 +56,7 @@ def extract(cls, get_from_carrier, carrier): elif len(fields) == 3: trace_id, span_id, sampled = fields elif len(fields) == 4: - trace_id, span_id, sampled, _parent_span_id = fields + trace_id, span_id, sampled, _ = fields else: return trace.INVALID_SPAN_CONTEXT else: @@ -100,14 +101,22 @@ def extract(cls, get_from_carrier, carrier): ) @classmethod - def inject(cls, context, set_in_carrier, carrier): - sampled = (trace.TraceOptions.SAMPLED & context.trace_options) != 0 + def inject(cls, span, set_in_carrier, carrier): + sampled = ( + trace.TraceOptions.SAMPLED & span.context.trace_options + ) != 0 set_in_carrier( - carrier, cls.TRACE_ID_KEY, format_trace_id(context.trace_id) + carrier, cls.TRACE_ID_KEY, format_trace_id(span.context.trace_id) ) set_in_carrier( - carrier, cls.SPAN_ID_KEY, format_span_id(context.span_id) + carrier, cls.SPAN_ID_KEY, format_span_id(span.context.span_id) ) + if span.parent is not None: + set_in_carrier( + carrier, + cls.PARENT_SPAN_ID_KEY, + format_span_id(span.parent.context.span_id), + ) set_in_carrier(carrier, cls.SAMPLED_KEY, "1" if sampled else "0") diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/context/threadlocal_context.py b/opentelemetry-sdk/src/opentelemetry/sdk/context/threadlocal_context.py new file mode 100644 index 00000000000..26d4329c52f --- /dev/null +++ b/opentelemetry-sdk/src/opentelemetry/sdk/context/threadlocal_context.py @@ -0,0 +1,44 @@ +# Copyright 2020, OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import threading + +from opentelemetry.context import Context, RuntimeContext + + +class ThreadLocalRuntimeContext(RuntimeContext): + """An implementation of the RuntimeContext interface + which uses thread-local storage under the hood. This + implementation is available for usage with Python 3.4. + """ + + _CONTEXT_KEY = "current_context" + + def __init__(self) -> None: + self._current_context = threading.local() + + def set_current(self, context: Context) -> None: + """See `opentelemetry.context.RuntimeContext.set_current`.""" + setattr(self._current_context, self._CONTEXT_KEY, context) + + def get_current(self) -> Context: + """See `opentelemetry.context.RuntimeContext.get_current`.""" + if not hasattr(self._current_context, self._CONTEXT_KEY): + setattr( + self._current_context, self._CONTEXT_KEY, Context(), + ) + return getattr(self._current_context, self._CONTEXT_KEY) + + +__all__ = ["ThreadLocalRuntimeContext"] diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/distributedcontext/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/distributedcontext/__init__.py index a20cbf89635..7a0a66a8a9a 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/distributedcontext/__init__.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/distributedcontext/__init__.py @@ -16,33 +16,27 @@ from contextlib import contextmanager from opentelemetry import distributedcontext as dctx_api -from opentelemetry.context import Context +from opentelemetry.context import Context, get_value, set_value +from opentelemetry.distributedcontext import ( + distributed_context_from_context, + with_distributed_context, +) class DistributedContextManager(dctx_api.DistributedContextManager): """See `opentelemetry.distributedcontext.DistributedContextManager` - Args: - name: The name of the context manager """ - def __init__(self, name: str = "") -> None: - if name: - slot_name = "DistributedContext.{}".format(name) - else: - slot_name = "DistributedContext" - - self._current_context = Context.register_slot(slot_name) - def get_current_context( - self, + self, context: typing.Optional[Context] = None ) -> typing.Optional[dctx_api.DistributedContext]: """Gets the current DistributedContext. Returns: A DistributedContext instance representing the current context. """ - return self._current_context.get() + return distributed_context_from_context(context=context) @contextmanager def use_context( @@ -58,9 +52,10 @@ def use_context( Args: context: A DistributedContext instance to make current. """ - snapshot = self._current_context.get() - self._current_context.set(context) + snapshot = distributed_context_from_context() + with_distributed_context(context) + try: yield context finally: - self._current_context.set(snapshot) + with_distributed_context(snapshot) diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/__init__.py index f0c3e0e6d33..4c9231582c8 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/__init__.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/__init__.py @@ -17,6 +17,8 @@ from typing import Dict, Sequence, Tuple, Type from opentelemetry import metrics as metrics_api +from opentelemetry.sdk.metrics.export.aggregate import Aggregator +from opentelemetry.sdk.metrics.export.batcher import Batcher, UngroupedBatcher from opentelemetry.util import time_ns logger = logging.getLogger(__name__) @@ -27,20 +29,45 @@ class LabelSet(metrics_api.LabelSet): """See `opentelemetry.metrics.LabelSet`.""" def __init__(self, labels: Dict[str, str] = None): - self.labels = labels + if labels is None: + labels = {} + # LabelSet properties used only in dictionaries for fast lookup + self._labels = tuple(labels.items()) + self._encoded = tuple(sorted(labels.items())) + + @property + def labels(self): + return self._labels + + def __hash__(self): + return hash(self._encoded) + + def __eq__(self, other): + return self._encoded == other._encoded class BaseHandle: + """The base handle class containing common behavior for all handles. + + Handles are responsible for operating on data for metric instruments for a + specific set of labels. + + Args: + value_type: The type of values this handle holds (int, float). + enabled: True if the originating instrument is enabled. + aggregator: The aggregator for this handle. Will handle aggregation + upon updates and checkpointing of values for exporting. + """ + def __init__( self, value_type: Type[metrics_api.ValueT], enabled: bool, - monotonic: bool, + aggregator: Aggregator, ): - self.data = value_type() self.value_type = value_type self.enabled = enabled - self.monotonic = monotonic + self.aggregator = aggregator self.last_update_timestamp = time_ns() def _validate_update(self, value: metrics_api.ValueT) -> bool: @@ -53,9 +80,15 @@ def _validate_update(self, value: metrics_api.ValueT) -> bool: return False return True + def update(self, value: metrics_api.ValueT): + self.last_update_timestamp = time_ns() + self.aggregator.update(value) + def __repr__(self): return '{}(data="{}", last_update_timestamp={})'.format( - type(self).__name__, self.data, self.last_update_timestamp + type(self).__name__, + self.aggregator.current, + self.last_update_timestamp, ) @@ -63,37 +96,31 @@ class CounterHandle(metrics_api.CounterHandle, BaseHandle): def add(self, value: metrics_api.ValueT) -> None: """See `opentelemetry.metrics.CounterHandle.add`.""" if self._validate_update(value): - if self.monotonic and value < 0: - logger.warning("Monotonic counter cannot descend.") - return - self.last_update_timestamp = time_ns() - self.data += value + self.update(value) class GaugeHandle(metrics_api.GaugeHandle, BaseHandle): def set(self, value: metrics_api.ValueT) -> None: """See `opentelemetry.metrics.GaugeHandle.set`.""" if self._validate_update(value): - if self.monotonic and value < self.data: - logger.warning("Monotonic gauge cannot descend.") - return - self.last_update_timestamp = time_ns() - self.data = value + self.update(value) class MeasureHandle(metrics_api.MeasureHandle, BaseHandle): def record(self, value: metrics_api.ValueT) -> None: """See `opentelemetry.metrics.MeasureHandle.record`.""" if self._validate_update(value): - if self.monotonic and value < 0: - logger.warning("Monotonic measure cannot accept negatives.") - return - self.last_update_timestamp = time_ns() - # TODO: record + self.update(value) class Metric(metrics_api.Metric): - """See `opentelemetry.metrics.Metric`.""" + """Base class for all metric types. + + Also known as metric instrument. This is the class that is used to + represent a metric that is to be continuously recorded and tracked. Each + metric has a set of handles that are created from the metric. See + `BaseHandle` for information on handles. + """ HANDLE_TYPE = BaseHandle @@ -103,17 +130,17 @@ def __init__( description: str, unit: str, value_type: Type[metrics_api.ValueT], + meter: "Meter", label_keys: Sequence[str] = (), enabled: bool = True, - monotonic: bool = False, ): self.name = name self.description = description self.unit = unit self.value_type = value_type + self.meter = meter self.label_keys = label_keys self.enabled = enabled - self.monotonic = monotonic self.handles = {} def get_handle(self, label_set: LabelSet) -> BaseHandle: @@ -121,9 +148,12 @@ def get_handle(self, label_set: LabelSet) -> BaseHandle: handle = self.handles.get(label_set) if not handle: handle = self.HANDLE_TYPE( - self.value_type, self.enabled, self.monotonic + self.value_type, + self.enabled, + # Aggregator will be created based off type of metric + self.meter.batcher.aggregator_for(self.__class__), ) - self.handles[label_set] = handle + self.handles[label_set] = handle return handle def __repr__(self): @@ -136,10 +166,6 @@ def __repr__(self): class Counter(Metric, metrics_api.Counter): """See `opentelemetry.metrics.Counter`. - - By default, counter values can only go up (monotonic). Negative inputs - will be discarded for monotonic counter metrics. Counter metrics that - have a monotonic option set to False allows negative inputs. """ HANDLE_TYPE = CounterHandle @@ -150,21 +176,21 @@ def __init__( description: str, unit: str, value_type: Type[metrics_api.ValueT], + meter: "Meter", label_keys: Sequence[str] = (), enabled: bool = True, - monotonic: bool = True, ): super().__init__( name, description, unit, value_type, + meter, label_keys=label_keys, enabled=enabled, - monotonic=monotonic, ) - def add(self, label_set: LabelSet, value: metrics_api.ValueT) -> None: + def add(self, value: metrics_api.ValueT, label_set: LabelSet) -> None: """See `opentelemetry.metrics.Counter.add`.""" self.get_handle(label_set).add(value) @@ -173,9 +199,6 @@ def add(self, label_set: LabelSet, value: metrics_api.ValueT) -> None: class Gauge(Metric, metrics_api.Gauge): """See `opentelemetry.metrics.Gauge`. - - By default, gauge values can go both up and down (non-monotonic). - Negative inputs will be discarded for monotonic gauge metrics. """ HANDLE_TYPE = GaugeHandle @@ -186,21 +209,21 @@ def __init__( description: str, unit: str, value_type: Type[metrics_api.ValueT], + meter: "Meter", label_keys: Sequence[str] = (), enabled: bool = True, - monotonic: bool = False, ): super().__init__( name, description, unit, value_type, + meter, label_keys=label_keys, enabled=enabled, - monotonic=monotonic, ) - def set(self, label_set: LabelSet, value: metrics_api.ValueT) -> None: + def set(self, value: metrics_api.ValueT, label_set: LabelSet) -> None: """See `opentelemetry.metrics.Gauge.set`.""" self.get_handle(label_set).set(value) @@ -208,50 +231,58 @@ def set(self, label_set: LabelSet, value: metrics_api.ValueT) -> None: class Measure(Metric, metrics_api.Measure): - """See `opentelemetry.metrics.Measure`. - - By default, measure metrics can accept both positive and negatives. - Negative inputs will be discarded when monotonic is True. - """ + """See `opentelemetry.metrics.Measure`.""" HANDLE_TYPE = MeasureHandle - def __init__( - self, - name: str, - description: str, - unit: str, - value_type: Type[metrics_api.ValueT], - label_keys: Sequence[str] = (), - enabled: bool = False, - monotonic: bool = False, - ): - super().__init__( - name, - description, - unit, - value_type, - label_keys=label_keys, - enabled=enabled, - monotonic=monotonic, - ) - - def record(self, label_set: LabelSet, value: metrics_api.ValueT) -> None: + def record(self, value: metrics_api.ValueT, label_set: LabelSet) -> None: """See `opentelemetry.metrics.Measure.record`.""" self.get_handle(label_set).record(value) UPDATE_FUNCTION = record +class Record: + """Container class used for processing in the `Batcher`""" + + def __init__( + self, metric: Metric, label_set: LabelSet, aggregator: Aggregator + ): + self.metric = metric + self.label_set = label_set + self.aggregator = aggregator + + # Used when getting a LabelSet with no key/values EMPTY_LABEL_SET = LabelSet() class Meter(metrics_api.Meter): - """See `opentelemetry.metrics.Meter`.""" + """See `opentelemetry.metrics.Meter`. + + Args: + batcher: The `Batcher` used for this meter. + """ - def __init__(self): - self.labels = {} + def __init__(self, batcher: Batcher = UngroupedBatcher(True)): + self.batcher = batcher + self.metrics = set() + + def collect(self) -> None: + """Collects all the metrics created with this `Meter` for export. + + Utilizes the batcher to create checkpoints of the current values in + each aggregator belonging to the metrics that were created with this + meter instance. + """ + for metric in self.metrics: + if metric.enabled: + for label_set, handle in metric.handles.items(): + # TODO: Consider storing records in memory? + record = Record(metric, label_set, handle.aggregator) + # Checkpoints the current aggregators + # Applies different batching logic based on type of batcher + self.batcher.process(record) def record_batch( self, @@ -260,7 +291,7 @@ def record_batch( ) -> None: """See `opentelemetry.metrics.Meter.record_batch`.""" for metric, value in record_tuples: - metric.UPDATE_FUNCTION(label_set, value) + metric.UPDATE_FUNCTION(value, label_set) def create_metric( self, @@ -271,19 +302,20 @@ def create_metric( metric_type: Type[metrics_api.MetricT], label_keys: Sequence[str] = (), enabled: bool = True, - monotonic: bool = False, ) -> metrics_api.MetricT: """See `opentelemetry.metrics.Meter.create_metric`.""" # Ignore type b/c of mypy bug in addition to missing annotations - return metric_type( # type: ignore + metric = metric_type( # type: ignore name, description, unit, value_type, + self, label_keys=label_keys, enabled=enabled, - monotonic=monotonic, ) + self.metrics.add(metric) + return metric def get_label_set(self, labels: Dict[str, str]): """See `opentelemetry.metrics.Meter.create_metric`. @@ -295,12 +327,4 @@ def get_label_set(self, labels: Dict[str, str]): """ if len(labels) == 0: return EMPTY_LABEL_SET - # Use simple encoding for now until encoding API is implemented - encoded = tuple(sorted(labels.items())) - # If LabelSet exists for this meter in memory, use existing one - if encoded not in self.labels: - self.labels[encoded] = LabelSet(labels=labels) - return self.labels[encoded] - - -meter = Meter() + return LabelSet(labels=labels) diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/__init__.py index b6cb396331a..6901a4efe46 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/__init__.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/__init__.py @@ -15,8 +15,6 @@ from enum import Enum from typing import Sequence, Tuple -from .. import Metric - class MetricsExportResult(Enum): SUCCESS = 0 @@ -24,6 +22,13 @@ class MetricsExportResult(Enum): FAILED_NOT_RETRYABLE = 2 +class MetricRecord: + def __init__(self, aggregator, label_set, metric): + self.aggregator = aggregator + self.label_set = label_set + self.metric = metric + + class MetricsExporter: """Interface for exporting metrics. @@ -32,15 +37,15 @@ class MetricsExporter: """ def export( - self, metric_tuples: Sequence[Tuple[Metric, Sequence[str]]] + self, metric_records: Sequence[MetricRecord] ) -> "MetricsExportResult": """Exports a batch of telemetry data. Args: - metric_tuples: A sequence of metric pairs. A metric pair consists - of a `Metric` and a sequence of strings. The sequence of - strings will be used to get the corresponding `MetricHandle` - from the `Metric` to export. + metric_records: A sequence of `MetricRecord` s. A `MetricRecord` + contains the metric to be exported, the label set associated + with that metric, as well as the aggregator used to export the + current checkpointed value. Returns: The result of the export @@ -57,17 +62,19 @@ class ConsoleMetricsExporter(MetricsExporter): """Implementation of `MetricsExporter` that prints metrics to the console. This class can be used for diagnostic purposes. It prints the exported - metric handles to the console STDOUT. + metrics to the console STDOUT. """ def export( - self, metric_tuples: Sequence[Tuple[Metric, Sequence[str]]] + self, metric_records: Sequence[MetricRecord] ) -> "MetricsExportResult": - for metric, label_values in metric_tuples: - handle = metric.get_handle(label_values) + for record in metric_records: print( - '{}(data="{}", label_values="{}", metric_data={})'.format( - type(self).__name__, metric, label_values, handle + '{}(data="{}", label_set="{}", value={})'.format( + type(self).__name__, + record.metric, + record.label_set.labels, + record.aggregator.checkpoint, ) ) return MetricsExportResult.SUCCESS diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/aggregate.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/aggregate.py new file mode 100644 index 00000000000..642fe1cdfe4 --- /dev/null +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/aggregate.py @@ -0,0 +1,58 @@ +# Copyright 2019, OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import abc + + +class Aggregator(abc.ABC): + """Base class for aggregators. + + Aggregators are responsible for holding aggregated values and taking a + snapshot of these values upon export (checkpoint). + """ + + def __init__(self): + self.current = None + self.checkpoint = None + + @abc.abstractmethod + def update(self, value): + """Updates the current with the new value.""" + + @abc.abstractmethod + def take_checkpoint(self): + """Stores a snapshot of the current value.""" + + @abc.abstractmethod + def merge(self, other): + """Combines two aggregator values.""" + + +class CounterAggregator(Aggregator): + """Aggregator for Counter metrics.""" + + def __init__(self): + super().__init__() + self.current = 0 + self.checkpoint = 0 + + def update(self, value): + self.current += value + + def take_checkpoint(self): + self.checkpoint = self.current + self.current = 0 + + def merge(self, other): + self.checkpoint += other.checkpoint diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/batcher.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/batcher.py new file mode 100644 index 00000000000..c81db0fe740 --- /dev/null +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/batcher.py @@ -0,0 +1,100 @@ +# Copyright 2019, OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import abc +from typing import Sequence, Type + +from opentelemetry.metrics import Counter, MetricT +from opentelemetry.sdk.metrics.export import MetricRecord +from opentelemetry.sdk.metrics.export.aggregate import ( + Aggregator, + CounterAggregator, +) + + +class Batcher(abc.ABC): + """Base class for all batcher types. + + The batcher is responsible for storing the aggregators and aggregated + values received from updates from metrics in the meter. The stored values + will be sent to an exporter for exporting. + """ + + def __init__(self, stateful: bool): + self._batch_map = {} + # stateful=True indicates the batcher computes checkpoints from over + # the process lifetime. False indicates the batcher computes + # checkpoints which describe the updates of a single collection period + # (deltas) + self.stateful = stateful + + def aggregator_for(self, metric_type: Type[MetricT]) -> Aggregator: + """Returns an aggregator based on metric type. + + Aggregators keep track of and updates values when metrics get updated. + """ + # pylint:disable=R0201 + if metric_type == Counter: + return CounterAggregator() + # TODO: Add other aggregators + return CounterAggregator() + + def checkpoint_set(self) -> Sequence[MetricRecord]: + """Returns a list of MetricRecords used for exporting. + + The list of MetricRecords is a snapshot created from the current + data in all of the aggregators in this batcher. + """ + metric_records = [] + for (metric, label_set), aggregator in self._batch_map.items(): + metric_records.append(MetricRecord(aggregator, label_set, metric)) + return metric_records + + def finished_collection(self): + """Performs certain post-export logic. + + For batchers that are stateless, resets the batch map. + """ + if not self.stateful: + self._batch_map = {} + + @abc.abstractmethod + def process(self, record) -> None: + """Stores record information to be ready for exporting. + + Depending on type of batcher, performs pre-export logic, such as + filtering records based off of keys. + """ + + +class UngroupedBatcher(Batcher): + """Accepts all records and passes them for exporting""" + + def process(self, record): + # Checkpoints the current aggregator value to be collected for export + record.aggregator.take_checkpoint() + batch_key = (record.metric, record.label_set) + batch_value = self._batch_map.get(batch_key) + aggregator = record.aggregator + if batch_value: + # Update the stored checkpointed value if exists. The call to merge + # here combines only identical records (same key). + batch_value.merge(aggregator) + return + if self.stateful: + # if stateful batcher, create a copy of the aggregator and update + # it with the current checkpointed value for long-term storage + aggregator = self.aggregator_for(record.metric.__class__) + aggregator.merge(record.aggregator) + self._batch_map[batch_key] = aggregator diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/controller.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/controller.py new file mode 100644 index 00000000000..03c857f04d9 --- /dev/null +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/export/controller.py @@ -0,0 +1,56 @@ +# Copyright 2019, OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import atexit +import threading + + +class PushController(threading.Thread): + """A push based controller, used for exporting. + + Uses a worker thread that periodically collects metrics for exporting, + exports them and performs some post-processing. + """ + + daemon = True + + def __init__(self, meter, exporter, interval, shutdown_on_exit=True): + super().__init__() + self.meter = meter + self.exporter = exporter + self.interval = interval + self.finished = threading.Event() + self._atexit_handler = None + if shutdown_on_exit: + self._atexit_handler = atexit.register(self.shutdown) + self.start() + + def run(self): + while not self.finished.wait(self.interval): + self.tick() + + def shutdown(self): + self.finished.set() + self.exporter.shutdown() + if self._atexit_handler is not None: + atexit.unregister(self._atexit_handler) + self._atexit_handler = None + + def tick(self): + # Collect all of the meter's metrics to be exported + self.meter.collect() + # Export the given metrics in the batcher + self.exporter.export(self.meter.batcher.checkpoint_set()) + # Perform post-exporting logic based on batcher configuration + self.meter.batcher.finished_collection() diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/trace/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/trace/__init__.py index 9829c8b33ba..6d249e65080 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/trace/__init__.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/trace/__init__.py @@ -22,11 +22,12 @@ from types import TracebackType from typing import Iterator, Optional, Sequence, Tuple, Type +from opentelemetry import context as context_api from opentelemetry import trace as trace_api -from opentelemetry.context import Context from opentelemetry.sdk import util from opentelemetry.sdk.util import BoundedDict, BoundedList from opentelemetry.trace import SpanContext, sampling +from opentelemetry.trace.propagation import get_span_key from opentelemetry.trace.status import Status, StatusCanonicalCode from opentelemetry.util import time_ns, types @@ -70,6 +71,18 @@ def shutdown(self) -> None: """Called when a :class:`opentelemetry.sdk.trace.Tracer` is shutdown. """ + def force_flush(self, timeout_millis: int = 30000) -> bool: + """Export all ended spans to the configured Exporter that have not yet + been exported. + + Args: + timeout_millis: The maximum amount of time to wait for spans to be + exported. + + Returns: + False if the timeout is exceeded, True otherwise. + """ + class MultiSpanProcessor(SpanProcessor): """Implementation of :class:`SpanProcessor` that forwards all received @@ -422,9 +435,7 @@ class Tracer(trace_api.Tracer): """ def __init__( - self, - source: "TracerSource", - instrumentation_info: InstrumentationInfo, + self, source: "TracerSource", instrumentation_info: InstrumentationInfo ) -> None: self.source = source self.instrumentation_info = instrumentation_info @@ -499,6 +510,8 @@ def start_span( # pylint: disable=too-many-locals ) if sampling_decision.sampled: + options = context.trace_options | trace_api.TraceOptions.SAMPLED + context.trace_options = trace_api.TraceOptions(options) if attributes is None: span_attributes = sampling_decision.attributes else: @@ -528,16 +541,31 @@ def use_span( ) -> Iterator[trace_api.Span]: """See `opentelemetry.trace.Tracer.use_span`.""" try: - span_snapshot = self.source.get_current_span() - self.source._current_span_slot.set( # pylint:disable=protected-access - span + context_snapshot = context_api.get_current() + context_api.set_current( + context_api.set_value(self.source.key, span) ) try: yield span finally: - self.source._current_span_slot.set( # pylint:disable=protected-access - span_snapshot + context_api.set_current(context_snapshot) + + except Exception as error: # pylint: disable=broad-except + if ( + span.status is None + and span._set_status_on_exception # pylint:disable=protected-access # noqa + ): + span.set_status( + Status( + canonical_code=StatusCanonicalCode.UNKNOWN, + description="{}: {}".format( + type(error).__name__, error + ), + ) ) + + raise + finally: if end_on_exit: span.end() @@ -551,7 +579,7 @@ def __init__( ): # TODO: How should multiple TracerSources behave? Should they get their own contexts? # This could be done by adding `str(id(self))` to the slot name. - self._current_span_slot = Context.register_slot("current_span") + self.key = get_span_key(tracer_source_id=str(id(self))) self._active_span_processor = MultiSpanProcessor() self.sampler = sampler self._atexit_handler = None @@ -574,7 +602,7 @@ def get_tracer( ) def get_current_span(self) -> Span: - return self._current_span_slot.get() + return context_api.get_value(self.key) # type: ignore def add_span_processor(self, span_processor: SpanProcessor) -> None: """Registers a new :class:`SpanProcessor` for this `TracerSource`. diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/trace/export/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/trace/export/__init__.py index b70fb010190..0a1b1c8041d 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/trace/export/__init__.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/trace/export/__init__.py @@ -14,11 +14,13 @@ import collections import logging +import sys import threading import typing from enum import Enum -from opentelemetry.context import Context +from opentelemetry.context import get_current, set_current, set_value +from opentelemetry.trace import DefaultSpan from opentelemetry.util import time_ns from .. import Span, SpanProcessor @@ -73,16 +75,22 @@ def on_start(self, span: Span) -> None: pass def on_end(self, span: Span) -> None: - with Context.use(suppress_instrumentation=True): - try: - self.span_exporter.export((span,)) - # pylint: disable=broad-except - except Exception: - logger.exception("Exception while exporting Span.") + backup_context = get_current() + set_current(set_value("suppress_instrumentation", True)) + try: + self.span_exporter.export((span,)) + # pylint: disable=broad-except + except Exception: + logger.exception("Exception while exporting Span.") + set_current(backup_context) def shutdown(self) -> None: self.span_exporter.shutdown() + def force_flush(self, timeout_millis: int = 30000) -> bool: + # pylint: disable=unused-argument + return True + class BatchExportSpanProcessor(SpanProcessor): """Batch span processor implementation. @@ -91,6 +99,8 @@ class BatchExportSpanProcessor(SpanProcessor): batches ended spans and pushes them to the configured `SpanExporter`. """ + _FLUSH_TOKEN_SPAN = DefaultSpan(context=None) + def __init__( self, span_exporter: SpanExporter, @@ -120,6 +130,9 @@ def __init__( ) # type: typing.Deque[Span] self.worker_thread = threading.Thread(target=self.worker, daemon=True) self.condition = threading.Condition(threading.Lock()) + self.flush_condition = threading.Condition(threading.Lock()) + # flag to indicate that there is a flush operation on progress + self._flushing = False self.schedule_delay_millis = schedule_delay_millis self.max_export_batch_size = max_export_batch_size self.max_queue_size = max_queue_size @@ -153,7 +166,10 @@ def on_end(self, span: Span) -> None: def worker(self): timeout = self.schedule_delay_millis / 1e3 while not self.done: - if len(self.queue) < self.max_export_batch_size: + if ( + len(self.queue) < self.max_export_batch_size + and not self._flushing + ): with self.condition: self.condition.wait(timeout) if not self.queue: @@ -171,37 +187,71 @@ def worker(self): timeout = self.schedule_delay_millis / 1e3 - duration # be sure that all spans are sent - self._flush() + self._drain_queue() def export(self) -> None: """Exports at most max_export_batch_size spans.""" idx = 0 - + notify_flush = False # currently only a single thread acts as consumer, so queue.pop() will # not raise an exception while idx < self.max_export_batch_size and self.queue: - self.spans_list[idx] = self.queue.pop() - idx += 1 - with Context.use(suppress_instrumentation=True): - try: - # Ignore type b/c the Optional[None]+slicing is too "clever" - # for mypy - self.span_exporter.export( - self.spans_list[:idx] - ) # type: ignore - # pylint: disable=broad-except - except Exception: - logger.exception("Exception while exporting Span batch.") + span = self.queue.pop() + if span is self._FLUSH_TOKEN_SPAN: + notify_flush = True + else: + self.spans_list[idx] = span + idx += 1 + backup_context = get_current() + set_current(set_value("suppress_instrumentation", True)) + try: + # Ignore type b/c the Optional[None]+slicing is too "clever" + # for mypy + self.span_exporter.export(self.spans_list[:idx]) # type: ignore + # pylint: disable=broad-except + except Exception: + logger.exception("Exception while exporting Span batch.") + set_current(backup_context) + + if notify_flush: + with self.flush_condition: + self.flush_condition.notify() # clean up list for index in range(idx): self.spans_list[index] = None - def _flush(self): - # export all elements until queue is empty + def _drain_queue(self): + """"Export all elements until queue is empty. + + Can only be called from the worker thread context because it invokes + `export` that is not thread safe. + """ while self.queue: self.export() + def force_flush(self, timeout_millis: int = 30000) -> bool: + if self.done: + logger.warning("Already shutdown, ignoring call to force_flush().") + return True + + self._flushing = True + self.queue.appendleft(self._FLUSH_TOKEN_SPAN) + + # wake up worker thread + with self.condition: + self.condition.notify_all() + + # wait for token to be processed + with self.flush_condition: + ret = self.flush_condition.wait(timeout_millis / 1e3) + + self._flushing = False + + if not ret: + logger.warning("Timeout was exceeded in force_flush().") + return ret + def shutdown(self) -> None: # signal the worker thread to finish and then wait for it self.done = True @@ -219,7 +269,15 @@ class ConsoleSpanExporter(SpanExporter): spans to the console STDOUT. """ + def __init__( + self, + out: typing.IO = sys.stdout, + formatter: typing.Callable[[Span], str] = str, + ): + self.out = out + self.formatter = formatter + def export(self, spans: typing.Sequence[Span]) -> SpanExportResult: for span in spans: - print(span) + self.out.write(self.formatter(span)) return SpanExportResult.SUCCESS diff --git a/opentelemetry-sdk/tests/conftest.py b/opentelemetry-sdk/tests/conftest.py new file mode 100644 index 00000000000..59e306f1303 --- /dev/null +++ b/opentelemetry-sdk/tests/conftest.py @@ -0,0 +1,30 @@ +# Copyright 2020, OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from os import environ +from sys import version_info + + +def pytest_sessionstart(session): + # pylint: disable=unused-argument + if version_info < (3, 5): + # contextvars are not supported in 3.4, use thread-local storage + environ["OPENTELEMETRY_CONTEXT"] = "threadlocal_context" + else: + environ["OPENTELEMETRY_CONTEXT"] = "contextvars_context" + + +def pytest_sessionfinish(session): + # pylint: disable=unused-argument + environ.pop("OPENTELEMETRY_CONTEXT") diff --git a/opentelemetry-sdk/tests/context/propagation/test_b3_format.py b/opentelemetry-sdk/tests/context/propagation/test_b3_format.py index 12155082692..17f7fdf7cae 100644 --- a/opentelemetry-sdk/tests/context/propagation/test_b3_format.py +++ b/opentelemetry-sdk/tests/context/propagation/test_b3_format.py @@ -26,6 +26,28 @@ def get_as_list(dict_object, key): return [value] if value is not None else [] +def get_child_parent_new_carrier(old_carrier): + + parent_context = FORMAT.extract(get_as_list, old_carrier) + + parent = trace.Span("parent", parent_context) + child = trace.Span( + "child", + trace_api.SpanContext( + parent_context.trace_id, + trace.generate_span_id(), + trace_options=parent_context.trace_options, + trace_state=parent_context.trace_state, + ), + parent=parent, + ) + + new_carrier = {} + FORMAT.inject(child, dict.__setitem__, new_carrier) + + return child, parent, new_carrier + + class TestB3Format(unittest.TestCase): @classmethod def setUpClass(cls): @@ -35,40 +57,76 @@ def setUpClass(cls): cls.serialized_span_id = b3_format.format_span_id( trace.generate_span_id() ) + cls.serialized_parent_id = b3_format.format_span_id( + trace.generate_span_id() + ) def test_extract_multi_header(self): """Test the extraction of B3 headers.""" - carrier = { - FORMAT.TRACE_ID_KEY: self.serialized_trace_id, - FORMAT.SPAN_ID_KEY: self.serialized_span_id, - FORMAT.SAMPLED_KEY: "1", - } - span_context = FORMAT.extract(get_as_list, carrier) - new_carrier = {} - FORMAT.inject(span_context, dict.__setitem__, new_carrier) + child, parent, new_carrier = get_child_parent_new_carrier( + { + FORMAT.TRACE_ID_KEY: self.serialized_trace_id, + FORMAT.SPAN_ID_KEY: self.serialized_span_id, + FORMAT.PARENT_SPAN_ID_KEY: self.serialized_parent_id, + FORMAT.SAMPLED_KEY: "1", + } + ) + self.assertEqual( - new_carrier[FORMAT.TRACE_ID_KEY], self.serialized_trace_id + new_carrier[FORMAT.TRACE_ID_KEY], + b3_format.format_trace_id(child.context.trace_id), ) self.assertEqual( - new_carrier[FORMAT.SPAN_ID_KEY], self.serialized_span_id + new_carrier[FORMAT.SPAN_ID_KEY], + b3_format.format_span_id(child.context.span_id), + ) + self.assertEqual( + new_carrier[FORMAT.PARENT_SPAN_ID_KEY], + b3_format.format_span_id(parent.context.span_id), ) self.assertEqual(new_carrier[FORMAT.SAMPLED_KEY], "1") def test_extract_single_header(self): """Test the extraction from a single b3 header.""" - carrier = { - FORMAT.SINGLE_HEADER_KEY: "{}-{}".format( - self.serialized_trace_id, self.serialized_span_id - ) - } - span_context = FORMAT.extract(get_as_list, carrier) - new_carrier = {} - FORMAT.inject(span_context, dict.__setitem__, new_carrier) + child, parent, new_carrier = get_child_parent_new_carrier( + { + FORMAT.SINGLE_HEADER_KEY: "{}-{}".format( + self.serialized_trace_id, self.serialized_span_id + ) + } + ) + self.assertEqual( - new_carrier[FORMAT.TRACE_ID_KEY], self.serialized_trace_id + new_carrier[FORMAT.TRACE_ID_KEY], + b3_format.format_trace_id(child.context.trace_id), ) self.assertEqual( - new_carrier[FORMAT.SPAN_ID_KEY], self.serialized_span_id + new_carrier[FORMAT.SPAN_ID_KEY], + b3_format.format_span_id(child.context.span_id), + ) + self.assertEqual(new_carrier[FORMAT.SAMPLED_KEY], "1") + + child, parent, new_carrier = get_child_parent_new_carrier( + { + FORMAT.SINGLE_HEADER_KEY: "{}-{}-1-{}".format( + self.serialized_trace_id, + self.serialized_span_id, + self.serialized_parent_id, + ) + } + ) + + self.assertEqual( + new_carrier[FORMAT.TRACE_ID_KEY], + b3_format.format_trace_id(child.context.trace_id), + ) + self.assertEqual( + new_carrier[FORMAT.SPAN_ID_KEY], + b3_format.format_span_id(child.context.span_id), + ) + self.assertEqual( + new_carrier[FORMAT.PARENT_SPAN_ID_KEY], + b3_format.format_span_id(parent.context.span_id), ) self.assertEqual(new_carrier[FORMAT.SAMPLED_KEY], "1") @@ -77,17 +135,18 @@ def test_extract_header_precedence(self): headers. """ single_header_trace_id = self.serialized_trace_id[:-3] + "123" - carrier = { - FORMAT.SINGLE_HEADER_KEY: "{}-{}".format( - single_header_trace_id, self.serialized_span_id - ), - FORMAT.TRACE_ID_KEY: self.serialized_trace_id, - FORMAT.SPAN_ID_KEY: self.serialized_span_id, - FORMAT.SAMPLED_KEY: "1", - } - span_context = FORMAT.extract(get_as_list, carrier) - new_carrier = {} - FORMAT.inject(span_context, dict.__setitem__, new_carrier) + + _, _, new_carrier = get_child_parent_new_carrier( + { + FORMAT.SINGLE_HEADER_KEY: "{}-{}".format( + single_header_trace_id, self.serialized_span_id + ), + FORMAT.TRACE_ID_KEY: self.serialized_trace_id, + FORMAT.SPAN_ID_KEY: self.serialized_span_id, + FORMAT.SAMPLED_KEY: "1", + } + ) + self.assertEqual( new_carrier[FORMAT.TRACE_ID_KEY], single_header_trace_id ) @@ -95,64 +154,65 @@ def test_extract_header_precedence(self): def test_enabled_sampling(self): """Test b3 sample key variants that turn on sampling.""" for variant in ["1", "True", "true", "d"]: - carrier = { - FORMAT.TRACE_ID_KEY: self.serialized_trace_id, - FORMAT.SPAN_ID_KEY: self.serialized_span_id, - FORMAT.SAMPLED_KEY: variant, - } - span_context = FORMAT.extract(get_as_list, carrier) - new_carrier = {} - FORMAT.inject(span_context, dict.__setitem__, new_carrier) + _, _, new_carrier = get_child_parent_new_carrier( + { + FORMAT.TRACE_ID_KEY: self.serialized_trace_id, + FORMAT.SPAN_ID_KEY: self.serialized_span_id, + FORMAT.SAMPLED_KEY: variant, + } + ) + self.assertEqual(new_carrier[FORMAT.SAMPLED_KEY], "1") def test_disabled_sampling(self): """Test b3 sample key variants that turn off sampling.""" for variant in ["0", "False", "false", None]: - carrier = { - FORMAT.TRACE_ID_KEY: self.serialized_trace_id, - FORMAT.SPAN_ID_KEY: self.serialized_span_id, - FORMAT.SAMPLED_KEY: variant, - } - span_context = FORMAT.extract(get_as_list, carrier) - new_carrier = {} - FORMAT.inject(span_context, dict.__setitem__, new_carrier) + _, _, new_carrier = get_child_parent_new_carrier( + { + FORMAT.TRACE_ID_KEY: self.serialized_trace_id, + FORMAT.SPAN_ID_KEY: self.serialized_span_id, + FORMAT.SAMPLED_KEY: variant, + } + ) + self.assertEqual(new_carrier[FORMAT.SAMPLED_KEY], "0") def test_flags(self): """x-b3-flags set to "1" should result in propagation.""" - carrier = { - FORMAT.TRACE_ID_KEY: self.serialized_trace_id, - FORMAT.SPAN_ID_KEY: self.serialized_span_id, - FORMAT.FLAGS_KEY: "1", - } - span_context = FORMAT.extract(get_as_list, carrier) - new_carrier = {} - FORMAT.inject(span_context, dict.__setitem__, new_carrier) + _, _, new_carrier = get_child_parent_new_carrier( + { + FORMAT.TRACE_ID_KEY: self.serialized_trace_id, + FORMAT.SPAN_ID_KEY: self.serialized_span_id, + FORMAT.FLAGS_KEY: "1", + } + ) + self.assertEqual(new_carrier[FORMAT.SAMPLED_KEY], "1") def test_flags_and_sampling(self): """Propagate if b3 flags and sampling are set.""" - carrier = { - FORMAT.TRACE_ID_KEY: self.serialized_trace_id, - FORMAT.SPAN_ID_KEY: self.serialized_span_id, - FORMAT.FLAGS_KEY: "1", - } - span_context = FORMAT.extract(get_as_list, carrier) - new_carrier = {} - FORMAT.inject(span_context, dict.__setitem__, new_carrier) + _, _, new_carrier = get_child_parent_new_carrier( + { + FORMAT.TRACE_ID_KEY: self.serialized_trace_id, + FORMAT.SPAN_ID_KEY: self.serialized_span_id, + FORMAT.FLAGS_KEY: "1", + } + ) + self.assertEqual(new_carrier[FORMAT.SAMPLED_KEY], "1") def test_64bit_trace_id(self): """64 bit trace ids should be padded to 128 bit trace ids.""" trace_id_64_bit = self.serialized_trace_id[:16] - carrier = { - FORMAT.TRACE_ID_KEY: trace_id_64_bit, - FORMAT.SPAN_ID_KEY: self.serialized_span_id, - FORMAT.FLAGS_KEY: "1", - } - span_context = FORMAT.extract(get_as_list, carrier) - new_carrier = {} - FORMAT.inject(span_context, dict.__setitem__, new_carrier) + + _, _, new_carrier = get_child_parent_new_carrier( + { + FORMAT.TRACE_ID_KEY: trace_id_64_bit, + FORMAT.SPAN_ID_KEY: self.serialized_span_id, + FORMAT.FLAGS_KEY: "1", + } + ) + self.assertEqual( new_carrier[FORMAT.TRACE_ID_KEY], "0" * 16 + trace_id_64_bit ) diff --git a/opentelemetry-sdk/tests/context/test_asyncio.py b/opentelemetry-sdk/tests/context/test_asyncio.py new file mode 100644 index 00000000000..5dc3637598e --- /dev/null +++ b/opentelemetry-sdk/tests/context/test_asyncio.py @@ -0,0 +1,154 @@ +# Copyright 2020, OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import asyncio +import unittest +from unittest.mock import patch + +from opentelemetry import context +from opentelemetry.sdk import trace +from opentelemetry.sdk.trace import export +from opentelemetry.sdk.trace.export.in_memory_span_exporter import ( + InMemorySpanExporter, +) + +try: + import contextvars # pylint: disable=unused-import + from opentelemetry.sdk.context.contextvars_context import ( + ContextVarsRuntimeContext, + ) +except ImportError: + raise unittest.SkipTest("contextvars not available") + + +_SPAN_NAMES = [ + "test_span1", + "test_span2", + "test_span3", + "test_span4", + "test_span5", +] + + +def stop_loop_when(loop, cond_func, timeout=5.0): + """Registers a periodic callback that stops the loop when cond_func() == True. + Compatible with both Tornado and asyncio. + """ + if cond_func() or timeout <= 0.0: + loop.stop() + return + + timeout -= 0.1 + loop.call_later(0.1, stop_loop_when, loop, cond_func, timeout) + + +def do_work() -> None: + context.set_current(context.set_value("say", "bar")) + + +class TestAsyncio(unittest.TestCase): + @asyncio.coroutine + def task(self, name): + with self.tracer.start_as_current_span(name): + context.set_value("say", "bar") + + def submit_another_task(self, name): + self.loop.create_task(self.task(name)) + + def setUp(self): + self.previous_context = context.get_current() + context.set_current(context.Context()) + self.tracer_source = trace.TracerSource() + self.tracer = self.tracer_source.get_tracer(__name__) + self.memory_exporter = InMemorySpanExporter() + span_processor = export.SimpleExportSpanProcessor(self.memory_exporter) + self.tracer_source.add_span_processor(span_processor) + self.loop = asyncio.get_event_loop() + + def tearDown(self): + context.set_current(self.previous_context) + + @patch( + "opentelemetry.context._RUNTIME_CONTEXT", ContextVarsRuntimeContext() + ) + def test_with_asyncio(self): + with self.tracer.start_as_current_span("asyncio_test"): + for name in _SPAN_NAMES: + self.submit_another_task(name) + + stop_loop_when( + self.loop, + lambda: len(self.memory_exporter.get_finished_spans()) >= 5, + timeout=5.0, + ) + self.loop.run_forever() + span_list = self.memory_exporter.get_finished_spans() + span_names_list = [span.name for span in span_list] + expected = [ + "test_span1", + "test_span2", + "test_span3", + "test_span4", + "test_span5", + "asyncio_test", + ] + self.assertCountEqual(span_names_list, expected) + span_names_list.sort() + expected.sort() + self.assertListEqual(span_names_list, expected) + expected_parent = next( + span for span in span_list if span.name == "asyncio_test" + ) + for span in span_list: + if span is expected_parent: + continue + self.assertEqual(span.parent, expected_parent) + + +class TestContextVarsContext(unittest.TestCase): + def setUp(self): + self.previous_context = context.get_current() + + def tearDown(self): + context.set_current(self.previous_context) + + @patch( + "opentelemetry.context._RUNTIME_CONTEXT", ContextVarsRuntimeContext() + ) + def test_context(self): + self.assertIsNone(context.get_value("say")) + empty = context.get_current() + second = context.set_value("say", "foo") + + self.assertEqual(context.get_value("say", context=second), "foo") + + do_work() + self.assertEqual(context.get_value("say"), "bar") + third = context.get_current() + + self.assertIsNone(context.get_value("say", context=empty)) + self.assertEqual(context.get_value("say", context=second), "foo") + self.assertEqual(context.get_value("say", context=third), "bar") + + @patch( + "opentelemetry.context._RUNTIME_CONTEXT", ContextVarsRuntimeContext() + ) + def test_set_value(self): + first = context.set_value("a", "yyy") + second = context.set_value("a", "zzz") + third = context.set_value("a", "---", first) + self.assertEqual("yyy", context.get_value("a", context=first)) + self.assertEqual("zzz", context.get_value("a", context=second)) + self.assertEqual("---", context.get_value("a", context=third)) + self.assertEqual(None, context.get_value("a")) diff --git a/opentelemetry-sdk/tests/context/test_context.py b/opentelemetry-sdk/tests/context/test_context.py new file mode 100644 index 00000000000..88a63109d16 --- /dev/null +++ b/opentelemetry-sdk/tests/context/test_context.py @@ -0,0 +1,63 @@ +# Copyright 2020, OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +from unittest.mock import patch + +from opentelemetry import context +from opentelemetry.sdk.context.threadlocal_context import ( + ThreadLocalRuntimeContext, +) + + +def do_work() -> None: + context.set_current(context.set_value("say", "bar")) + + +class TestThreadLocalContext(unittest.TestCase): + def setUp(self): + self.previous_context = context.get_current() + + def tearDown(self): + context.set_current(self.previous_context) + + @patch( + "opentelemetry.context._RUNTIME_CONTEXT", ThreadLocalRuntimeContext() + ) + def test_context(self): + self.assertIsNone(context.get_value("say")) + empty = context.get_current() + second = context.set_value("say", "foo") + + self.assertEqual(context.get_value("say", context=second), "foo") + + do_work() + self.assertEqual(context.get_value("say"), "bar") + third = context.get_current() + + self.assertIsNone(context.get_value("say", context=empty)) + self.assertEqual(context.get_value("say", context=second), "foo") + self.assertEqual(context.get_value("say", context=third), "bar") + + @patch( + "opentelemetry.context._RUNTIME_CONTEXT", ThreadLocalRuntimeContext() + ) + def test_set_value(self): + first = context.set_value("a", "yyy") + second = context.set_value("a", "zzz") + third = context.set_value("a", "---", first) + self.assertEqual("yyy", context.get_value("a", context=first)) + self.assertEqual("zzz", context.get_value("a", context=second)) + self.assertEqual("---", context.get_value("a", context=third)) + self.assertEqual(None, context.get_value("a")) diff --git a/opentelemetry-sdk/tests/context/test_threads.py b/opentelemetry-sdk/tests/context/test_threads.py new file mode 100644 index 00000000000..e8552b9135e --- /dev/null +++ b/opentelemetry-sdk/tests/context/test_threads.py @@ -0,0 +1,87 @@ +# Copyright 2020, OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +from multiprocessing.dummy import Pool +from unittest.mock import patch + +from opentelemetry import context +from opentelemetry.sdk import trace +from opentelemetry.sdk.context.threadlocal_context import ( + ThreadLocalRuntimeContext, +) +from opentelemetry.sdk.trace import export +from opentelemetry.sdk.trace.export.in_memory_span_exporter import ( + InMemorySpanExporter, +) + + +class TestThreads(unittest.TestCase): + span_names = [ + "test_span1", + "test_span2", + "test_span3", + "test_span4", + "test_span5", + ] + + def do_work(self, name="default"): + with self.tracer.start_as_current_span(name): + context.set_value("say-something", "bar") + + def setUp(self): + self.previous_context = context.get_current() + context.set_current(context.Context()) + self.tracer_source = trace.TracerSource() + self.tracer = self.tracer_source.get_tracer(__name__) + self.memory_exporter = InMemorySpanExporter() + span_processor = export.SimpleExportSpanProcessor(self.memory_exporter) + self.tracer_source.add_span_processor(span_processor) + + def tearDown(self): + context.set_current(self.previous_context) + + @patch( + "opentelemetry.context._RUNTIME_CONTEXT", ThreadLocalRuntimeContext() + ) + def test_with_threads(self): + with self.tracer.start_as_current_span("threads_test"): + pool = Pool(5) # create a thread pool + pool.map( + context.with_current_context(self.do_work), self.span_names + ) + pool.close() + pool.join() + span_list = self.memory_exporter.get_finished_spans() + span_names_list = [span.name for span in span_list] + expected = [ + "test_span1", + "test_span2", + "test_span3", + "test_span4", + "test_span5", + "threads_test", + ] + self.assertCountEqual(span_names_list, expected) + span_names_list.sort() + expected.sort() + self.assertListEqual(span_names_list, expected) + expected_parent = next( + span for span in span_list if span.name == "threads_test" + ) + # FIXME + for span in span_list: + if span is expected_parent: + continue + self.assertEqual(span.parent, expected_parent) diff --git a/opentelemetry-sdk/tests/metrics/export/test_export.py b/opentelemetry-sdk/tests/metrics/export/test_export.py index 4d8e6df8575..816bfcfca9c 100644 --- a/opentelemetry-sdk/tests/metrics/export/test_export.py +++ b/opentelemetry-sdk/tests/metrics/export/test_export.py @@ -16,9 +16,16 @@ from unittest import mock from opentelemetry.sdk import metrics -from opentelemetry.sdk.metrics.export import ConsoleMetricsExporter +from opentelemetry.sdk.metrics.export import ( + ConsoleMetricsExporter, + MetricRecord, +) +from opentelemetry.sdk.metrics.export.aggregate import CounterAggregator +from opentelemetry.sdk.metrics.export.batcher import UngroupedBatcher +from opentelemetry.sdk.metrics.export.controller import PushController +# pylint: disable=protected-access class TestConsoleMetricsExporter(unittest.TestCase): # pylint: disable=no-self-use def test_export(self): @@ -34,10 +41,214 @@ def test_export(self): ) kvp = {"environment": "staging"} label_set = meter.get_label_set(kvp) - handle = metric.get_handle(label_set) - result = '{}(data="{}", label_values="{}", metric_data={})'.format( - ConsoleMetricsExporter.__name__, metric, label_set, handle + aggregator = CounterAggregator() + record = MetricRecord(aggregator, label_set, metric) + result = '{}(data="{}", label_set="{}", value={})'.format( + ConsoleMetricsExporter.__name__, + metric, + label_set.labels, + aggregator.checkpoint, ) with mock.patch("sys.stdout") as mock_stdout: - exporter.export([(metric, label_set)]) + exporter.export([record]) mock_stdout.write.assert_any_call(result) + + +class TestBatcher(unittest.TestCase): + def test_aggregator_for_counter(self): + batcher = UngroupedBatcher(True) + self.assertTrue( + isinstance( + batcher.aggregator_for(metrics.Counter), CounterAggregator + ) + ) + + # TODO: Add other aggregator tests + + def test_checkpoint_set(self): + meter = metrics.Meter() + batcher = UngroupedBatcher(True) + aggregator = CounterAggregator() + metric = metrics.Counter( + "available memory", + "available memory", + "bytes", + int, + meter, + ("environment",), + ) + aggregator.update(1.0) + label_set = metrics.LabelSet() + _batch_map = {} + _batch_map[(metric, label_set)] = aggregator + batcher._batch_map = _batch_map + records = batcher.checkpoint_set() + self.assertEqual(len(records), 1) + self.assertEqual(records[0].metric, metric) + self.assertEqual(records[0].label_set, label_set) + self.assertEqual(records[0].aggregator, aggregator) + + def test_checkpoint_set_empty(self): + batcher = UngroupedBatcher(True) + records = batcher.checkpoint_set() + self.assertEqual(len(records), 0) + + def test_finished_collection_stateless(self): + meter = metrics.Meter() + batcher = UngroupedBatcher(False) + aggregator = CounterAggregator() + metric = metrics.Counter( + "available memory", + "available memory", + "bytes", + int, + meter, + ("environment",), + ) + aggregator.update(1.0) + label_set = metrics.LabelSet() + _batch_map = {} + _batch_map[(metric, label_set)] = aggregator + batcher._batch_map = _batch_map + batcher.finished_collection() + self.assertEqual(len(batcher._batch_map), 0) + + def test_finished_collection_stateful(self): + meter = metrics.Meter() + batcher = UngroupedBatcher(True) + aggregator = CounterAggregator() + metric = metrics.Counter( + "available memory", + "available memory", + "bytes", + int, + meter, + ("environment",), + ) + aggregator.update(1.0) + label_set = metrics.LabelSet() + _batch_map = {} + _batch_map[(metric, label_set)] = aggregator + batcher._batch_map = _batch_map + batcher.finished_collection() + self.assertEqual(len(batcher._batch_map), 1) + + # TODO: Abstract the logic once other batchers implemented + def test_ungrouped_batcher_process_exists(self): + meter = metrics.Meter() + batcher = UngroupedBatcher(True) + aggregator = CounterAggregator() + aggregator2 = CounterAggregator() + metric = metrics.Counter( + "available memory", + "available memory", + "bytes", + int, + meter, + ("environment",), + ) + label_set = metrics.LabelSet() + _batch_map = {} + _batch_map[(metric, label_set)] = aggregator + aggregator2.update(1.0) + batcher._batch_map = _batch_map + record = metrics.Record(metric, label_set, aggregator2) + batcher.process(record) + self.assertEqual(len(batcher._batch_map), 1) + self.assertIsNotNone(batcher._batch_map.get((metric, label_set))) + self.assertEqual( + batcher._batch_map.get((metric, label_set)).current, 0 + ) + self.assertEqual( + batcher._batch_map.get((metric, label_set)).checkpoint, 1.0 + ) + + def test_ungrouped_batcher_process_not_exists(self): + meter = metrics.Meter() + batcher = UngroupedBatcher(True) + aggregator = CounterAggregator() + metric = metrics.Counter( + "available memory", + "available memory", + "bytes", + int, + meter, + ("environment",), + ) + label_set = metrics.LabelSet() + _batch_map = {} + aggregator.update(1.0) + batcher._batch_map = _batch_map + record = metrics.Record(metric, label_set, aggregator) + batcher.process(record) + self.assertEqual(len(batcher._batch_map), 1) + self.assertIsNotNone(batcher._batch_map.get((metric, label_set))) + self.assertEqual( + batcher._batch_map.get((metric, label_set)).current, 0 + ) + self.assertEqual( + batcher._batch_map.get((metric, label_set)).checkpoint, 1.0 + ) + + def test_ungrouped_batcher_process_not_stateful(self): + meter = metrics.Meter() + batcher = UngroupedBatcher(True) + aggregator = CounterAggregator() + metric = metrics.Counter( + "available memory", + "available memory", + "bytes", + int, + meter, + ("environment",), + ) + label_set = metrics.LabelSet() + _batch_map = {} + aggregator.update(1.0) + batcher._batch_map = _batch_map + record = metrics.Record(metric, label_set, aggregator) + batcher.process(record) + self.assertEqual(len(batcher._batch_map), 1) + self.assertIsNotNone(batcher._batch_map.get((metric, label_set))) + self.assertEqual( + batcher._batch_map.get((metric, label_set)).current, 0 + ) + self.assertEqual( + batcher._batch_map.get((metric, label_set)).checkpoint, 1.0 + ) + + +class TestAggregator(unittest.TestCase): + # TODO: test other aggregators once implemented + def test_counter_update(self): + counter = CounterAggregator() + counter.update(1.0) + counter.update(2.0) + self.assertEqual(counter.current, 3.0) + + def test_counter_checkpoint(self): + counter = CounterAggregator() + counter.update(2.0) + counter.take_checkpoint() + self.assertEqual(counter.current, 0) + self.assertEqual(counter.checkpoint, 2.0) + + def test_counter_merge(self): + counter = CounterAggregator() + counter2 = CounterAggregator() + counter.checkpoint = 1.0 + counter2.checkpoint = 3.0 + counter.merge(counter2) + self.assertEqual(counter.checkpoint, 4.0) + + +class TestController(unittest.TestCase): + def test_push_controller(self): + meter = mock.Mock() + exporter = mock.Mock() + controller = PushController(meter, exporter, 5.0) + meter.collect.assert_not_called() + exporter.export.assert_not_called() + controller.shutdown() + self.assertTrue(controller.finished.isSet()) + exporter.shutdown.assert_any_call() diff --git a/opentelemetry-sdk/tests/metrics/test_metrics.py b/opentelemetry-sdk/tests/metrics/test_metrics.py index 81e6dd2c9d5..a887621b0cb 100644 --- a/opentelemetry-sdk/tests/metrics/test_metrics.py +++ b/opentelemetry-sdk/tests/metrics/test_metrics.py @@ -17,6 +17,7 @@ from opentelemetry import metrics as metrics_api from opentelemetry.sdk import metrics +from opentelemetry.sdk.metrics import export class TestMeter(unittest.TestCase): @@ -24,6 +25,43 @@ def test_extends_api(self): meter = metrics.Meter() self.assertIsInstance(meter, metrics_api.Meter) + def test_collect(self): + meter = metrics.Meter() + batcher_mock = mock.Mock() + meter.batcher = batcher_mock + label_keys = ("key1",) + counter = metrics.Counter( + "name", "desc", "unit", float, meter, label_keys + ) + kvp = {"key1": "value1"} + label_set = meter.get_label_set(kvp) + counter.add(label_set, 1.0) + meter.metrics.add(counter) + meter.collect() + self.assertTrue(batcher_mock.process.called) + + def test_collect_no_metrics(self): + meter = metrics.Meter() + batcher_mock = mock.Mock() + meter.batcher = batcher_mock + meter.collect() + self.assertFalse(batcher_mock.process.called) + + def test_collect_disabled_metric(self): + meter = metrics.Meter() + batcher_mock = mock.Mock() + meter.batcher = batcher_mock + label_keys = ("key1",) + counter = metrics.Counter( + "name", "desc", "unit", float, meter, label_keys, False + ) + kvp = {"key1": "value1"} + label_set = meter.get_label_set(kvp) + counter.add(label_set, 1.0) + meter.metrics.add(counter) + meter.collect() + self.assertFalse(batcher_mock.process.called) + def test_record_batch(self): meter = metrics.Meter() label_keys = ("key1",) @@ -34,7 +72,7 @@ def test_record_batch(self): label_set = meter.get_label_set(kvp) record_tuples = [(counter, 1.0)] meter.record_batch(label_set, record_tuples) - self.assertEqual(counter.get_handle(label_set).data, 1.0) + self.assertEqual(counter.get_handle(label_set).aggregator.current, 1.0) def test_record_batch_multiple(self): meter = metrics.Meter() @@ -44,15 +82,16 @@ def test_record_batch_multiple(self): counter = metrics.Counter( "name", "desc", "unit", float, meter, label_keys ) - gauge = metrics.Gauge("name", "desc", "unit", int, label_keys) + gauge = metrics.Gauge("name", "desc", "unit", int, meter, label_keys) measure = metrics.Measure( "name", "desc", "unit", float, meter, label_keys ) record_tuples = [(counter, 1.0), (gauge, 5), (measure, 3.0)] meter.record_batch(label_set, record_tuples) - self.assertEqual(counter.get_handle(label_set).data, 1.0) - self.assertEqual(gauge.get_handle(label_set).data, 5) - self.assertEqual(measure.get_handle(label_set).data, 0) + self.assertEqual(counter.get_handle(label_set).aggregator.current, 1.0) + self.assertEqual(gauge.get_handle(label_set).aggregator.current, 5.0) + # TODO: Fix when aggregator implemented for measure + self.assertEqual(measure.get_handle(label_set).aggregator.current, 3.0) def test_record_batch_exists(self): meter = metrics.Meter() @@ -62,12 +101,12 @@ def test_record_batch_exists(self): counter = metrics.Counter( "name", "desc", "unit", float, meter, label_keys ) - counter.add(label_set, 1.0) + counter.add(1.0, label_set) handle = counter.get_handle(label_set) record_tuples = [(counter, 1.0)] meter.record_batch(label_set, record_tuples) self.assertEqual(counter.get_handle(label_set), handle) - self.assertEqual(handle.data, 2.0) + self.assertEqual(handle.aggregator.current, 2.0) def test_create_metric(self): meter = metrics.Meter() @@ -100,8 +139,9 @@ def test_get_label_set(self): meter = metrics.Meter() kvp = {"environment": "staging", "a": "z"} label_set = meter.get_label_set(kvp) - encoded = tuple(sorted(kvp.items())) - self.assertIs(meter.labels[encoded], label_set) + label_set2 = meter.get_label_set(kvp) + labels = set([label_set, label_set2]) + self.assertEqual(len(labels), 1) def test_get_label_set_empty(self): meter = metrics.Meter() @@ -109,13 +149,6 @@ def test_get_label_set_empty(self): label_set = meter.get_label_set(kvp) self.assertEqual(label_set, metrics.EMPTY_LABEL_SET) - def test_get_label_set_exists(self): - meter = metrics.Meter() - kvp = {"environment": "staging", "a": "z"} - label_set = meter.get_label_set(kvp) - label_set2 = meter.get_label_set(kvp) - self.assertIs(label_set, label_set2) - class TestMetric(unittest.TestCase): def test_get_handle(self): @@ -132,114 +165,131 @@ def test_get_handle(self): class TestCounter(unittest.TestCase): def test_add(self): meter = metrics.Meter() - metric = metrics.Counter("name", "desc", "unit", int, ("key",)) + metric = metrics.Counter("name", "desc", "unit", int, meter, ("key",)) kvp = {"key": "value"} label_set = meter.get_label_set(kvp) handle = metric.get_handle(label_set) - metric.add(label_set, 3) - metric.add(label_set, 2) - self.assertEqual(handle.data, 5) + metric.add(3, label_set) + metric.add(2, label_set) + self.assertEqual(handle.aggregator.current, 5) class TestGauge(unittest.TestCase): def test_set(self): meter = metrics.Meter() - metric = metrics.Gauge("name", "desc", "unit", int, ("key",)) + metric = metrics.Gauge("name", "desc", "unit", int, meter, ("key",)) kvp = {"key": "value"} label_set = meter.get_label_set(kvp) handle = metric.get_handle(label_set) - metric.set(label_set, 3) - self.assertEqual(handle.data, 3) - metric.set(label_set, 2) - self.assertEqual(handle.data, 2) + metric.set(3, label_set) + self.assertEqual(handle.aggregator.current, 3) + metric.set(2, label_set) + # TODO: Fix once other aggregators implemented + self.assertEqual(handle.aggregator.current, 5) class TestMeasure(unittest.TestCase): def test_record(self): meter = metrics.Meter() - metric = metrics.Measure("name", "desc", "unit", int, ("key",)) + metric = metrics.Measure("name", "desc", "unit", int, meter, ("key",)) kvp = {"key": "value"} label_set = meter.get_label_set(kvp) handle = metric.get_handle(label_set) - metric.record(label_set, 3) - # Record not implemented yet - self.assertEqual(handle.data, 0) + metric.record(3, label_set) + # TODO: Fix once other aggregators implemented + self.assertEqual(handle.aggregator.current, 3) class TestCounterHandle(unittest.TestCase): def test_add(self): - handle = metrics.CounterHandle(int, True, False) + aggregator = export.aggregate.CounterAggregator() + handle = metrics.CounterHandle(int, True, aggregator) handle.add(3) - self.assertEqual(handle.data, 3) + self.assertEqual(handle.aggregator.current, 3) def test_add_disabled(self): - handle = metrics.CounterHandle(int, False, False) + aggregator = export.aggregate.CounterAggregator() + handle = metrics.CounterHandle(int, False, aggregator) handle.add(3) - self.assertEqual(handle.data, 0) - - @mock.patch("opentelemetry.sdk.metrics.logger") - def test_add_monotonic(self, logger_mock): - handle = metrics.CounterHandle(int, True, True) - handle.add(-3) - self.assertEqual(handle.data, 0) - self.assertTrue(logger_mock.warning.called) + self.assertEqual(handle.aggregator.current, 0) @mock.patch("opentelemetry.sdk.metrics.logger") def test_add_incorrect_type(self, logger_mock): - handle = metrics.CounterHandle(int, True, False) + aggregator = export.aggregate.CounterAggregator() + handle = metrics.CounterHandle(int, True, aggregator) handle.add(3.0) - self.assertEqual(handle.data, 0) + self.assertEqual(handle.aggregator.current, 0) self.assertTrue(logger_mock.warning.called) + @mock.patch("opentelemetry.sdk.metrics.time_ns") + def test_update(self, time_mock): + aggregator = export.aggregate.CounterAggregator() + handle = metrics.CounterHandle(int, True, aggregator) + time_mock.return_value = 123 + handle.update(4.0) + self.assertEqual(handle.last_update_timestamp, 123) + self.assertEqual(handle.aggregator.current, 4.0) + +# TODO: fix tests once aggregator implemented class TestGaugeHandle(unittest.TestCase): def test_set(self): - handle = metrics.GaugeHandle(int, True, False) + aggregator = export.aggregate.CounterAggregator() + handle = metrics.GaugeHandle(int, True, aggregator) handle.set(3) - self.assertEqual(handle.data, 3) + self.assertEqual(handle.aggregator.current, 3) def test_set_disabled(self): - handle = metrics.GaugeHandle(int, False, False) + aggregator = export.aggregate.CounterAggregator() + handle = metrics.GaugeHandle(int, False, aggregator) handle.set(3) - self.assertEqual(handle.data, 0) - - @mock.patch("opentelemetry.sdk.metrics.logger") - def test_set_monotonic(self, logger_mock): - handle = metrics.GaugeHandle(int, True, True) - handle.set(-3) - self.assertEqual(handle.data, 0) - self.assertTrue(logger_mock.warning.called) + self.assertEqual(handle.aggregator.current, 0) @mock.patch("opentelemetry.sdk.metrics.logger") def test_set_incorrect_type(self, logger_mock): - handle = metrics.GaugeHandle(int, True, False) + aggregator = export.aggregate.CounterAggregator() + handle = metrics.GaugeHandle(int, True, aggregator) handle.set(3.0) - self.assertEqual(handle.data, 0) + self.assertEqual(handle.aggregator.current, 0) self.assertTrue(logger_mock.warning.called) + @mock.patch("opentelemetry.sdk.metrics.time_ns") + def test_update(self, time_mock): + aggregator = export.aggregate.CounterAggregator() + handle = metrics.GaugeHandle(int, True, aggregator) + time_mock.return_value = 123 + handle.update(4.0) + self.assertEqual(handle.last_update_timestamp, 123) + self.assertEqual(handle.aggregator.current, 4.0) + +# TODO: fix tests once aggregator implemented class TestMeasureHandle(unittest.TestCase): def test_record(self): - handle = metrics.MeasureHandle(int, False, False) + aggregator = export.aggregate.CounterAggregator() + handle = metrics.MeasureHandle(int, False, aggregator) handle.record(3) - # Record not implemented yet - self.assertEqual(handle.data, 0) + self.assertEqual(handle.aggregator.current, 0) def test_record_disabled(self): - handle = metrics.MeasureHandle(int, False, False) + aggregator = export.aggregate.CounterAggregator() + handle = metrics.MeasureHandle(int, False, aggregator) handle.record(3) - self.assertEqual(handle.data, 0) - - @mock.patch("opentelemetry.sdk.metrics.logger") - def test_record_monotonic(self, logger_mock): - handle = metrics.MeasureHandle(int, True, True) - handle.record(-3) - self.assertEqual(handle.data, 0) - self.assertTrue(logger_mock.warning.called) + self.assertEqual(handle.aggregator.current, 0) @mock.patch("opentelemetry.sdk.metrics.logger") def test_record_incorrect_type(self, logger_mock): - handle = metrics.MeasureHandle(int, True, False) + aggregator = export.aggregate.CounterAggregator() + handle = metrics.MeasureHandle(int, True, aggregator) handle.record(3.0) - self.assertEqual(handle.data, 0) + self.assertEqual(handle.aggregator.current, 0) self.assertTrue(logger_mock.warning.called) + + @mock.patch("opentelemetry.sdk.metrics.time_ns") + def test_update(self, time_mock): + aggregator = export.aggregate.CounterAggregator() + handle = metrics.MeasureHandle(int, True, aggregator) + time_mock.return_value = 123 + handle.update(4.0) + self.assertEqual(handle.last_update_timestamp, 123) + self.assertEqual(handle.aggregator.current, 4.0) diff --git a/opentelemetry-sdk/tests/trace/export/test_export.py b/opentelemetry-sdk/tests/trace/export/test_export.py index 54fdee2629b..e598b9680a9 100644 --- a/opentelemetry-sdk/tests/trace/export/test_export.py +++ b/opentelemetry-sdk/tests/trace/export/test_export.py @@ -14,6 +14,7 @@ import time import unittest +from logging import WARNING from unittest import mock from opentelemetry import trace as trace_api @@ -24,10 +25,16 @@ class MySpanExporter(export.SpanExporter): """Very simple span exporter used for testing.""" - def __init__(self, destination, max_export_batch_size=None): + def __init__( + self, + destination, + max_export_batch_size=None, + export_timeout_millis=0.0, + ): self.destination = destination self.max_export_batch_size = max_export_batch_size self.is_shutdown = False + self.export_timeout = export_timeout_millis / 1e3 def export(self, spans: trace.Span) -> export.SpanExportResult: if ( @@ -35,6 +42,7 @@ def export(self, spans: trace.Span) -> export.SpanExportResult: and len(spans) > self.max_export_batch_size ): raise ValueError("Batch is too big") + time.sleep(self.export_timeout) self.destination.extend(span.name for span in spans) return export.SpanExportResult.SUCCESS @@ -97,7 +105,7 @@ def _create_start_and_end_span(name, span_processor): class TestBatchExportSpanProcessor(unittest.TestCase): - def test_batch_span_processor(self): + def test_shutdown(self): spans_names_list = [] my_exporter = MySpanExporter(destination=spans_names_list) @@ -109,9 +117,50 @@ def test_batch_span_processor(self): _create_start_and_end_span(name, span_processor) span_processor.shutdown() + self.assertTrue(my_exporter.is_shutdown) + + # check that spans are exported without an explicitly call to + # force_flush() self.assertListEqual(span_names, spans_names_list) - self.assertTrue(my_exporter.is_shutdown) + def test_flush(self): + spans_names_list = [] + + my_exporter = MySpanExporter(destination=spans_names_list) + span_processor = export.BatchExportSpanProcessor(my_exporter) + + span_names0 = ["xxx", "bar", "foo"] + span_names1 = ["yyy", "baz", "fox"] + + for name in span_names0: + _create_start_and_end_span(name, span_processor) + + self.assertTrue(span_processor.force_flush()) + self.assertListEqual(span_names0, spans_names_list) + + # create some more spans to check that span processor still works + for name in span_names1: + _create_start_and_end_span(name, span_processor) + + self.assertTrue(span_processor.force_flush()) + self.assertListEqual(span_names0 + span_names1, spans_names_list) + + span_processor.shutdown() + + def test_flush_timeout(self): + spans_names_list = [] + + my_exporter = MySpanExporter( + destination=spans_names_list, export_timeout_millis=500 + ) + span_processor = export.BatchExportSpanProcessor(my_exporter) + + _create_start_and_end_span("foo", span_processor) + + # check that the timeout is not meet + with self.assertLogs(level=WARNING): + self.assertFalse(span_processor.force_flush(100)) + span_processor.shutdown() def test_batch_span_processor_lossless(self): """Test that no spans are lost when sending max_queue_size spans""" @@ -127,8 +176,9 @@ def test_batch_span_processor_lossless(self): for _ in range(512): _create_start_and_end_span("foo", span_processor) - span_processor.shutdown() + self.assertTrue(span_processor.force_flush()) self.assertEqual(len(spans_names_list), 512) + span_processor.shutdown() def test_batch_span_processor_many_spans(self): """Test that no spans are lost when sending many spans""" @@ -150,8 +200,9 @@ def test_batch_span_processor_many_spans(self): time.sleep(0.05) # give some time for the exporter to upload spans - span_processor.shutdown() + self.assertTrue(span_processor.force_flush()) self.assertEqual(len(spans_names_list), 1024) + span_processor.shutdown() def test_batch_span_processor_scheduled_delay(self): """Test that spans are exported each schedule_delay_millis""" @@ -225,3 +276,31 @@ def test_batch_span_processor_parameters(self): max_queue_size=256, max_export_batch_size=512, ) + + +class TestConsoleSpanExporter(unittest.TestCase): + def test_export(self): # pylint: disable=no-self-use + """Check that the console exporter prints spans.""" + exporter = export.ConsoleSpanExporter() + + # Mocking stdout interferes with debugging and test reporting, mock on + # the exporter instance instead. + span = trace.Span("span name", mock.Mock()) + with mock.patch.object(exporter, "out") as mock_stdout: + exporter.export([span]) + mock_stdout.write.assert_called_once_with(str(span)) + self.assertEqual(mock_stdout.write.call_count, 1) + + def test_export_custom(self): # pylint: disable=no-self-use + """Check that console exporter uses custom io, formatter.""" + mock_span_str = mock.Mock(str) + + def formatter(span): # pylint: disable=unused-argument + return mock_span_str + + mock_stdout = mock.Mock() + exporter = export.ConsoleSpanExporter( + out=mock_stdout, formatter=formatter + ) + exporter.export([trace.Span("span name", mock.Mock())]) + mock_stdout.write.assert_called_once_with(mock_span_str) diff --git a/opentelemetry-sdk/tests/trace/test_trace.py b/opentelemetry-sdk/tests/trace/test_trace.py index 53a10518aaf..a62f91a7e35 100644 --- a/opentelemetry-sdk/tests/trace/test_trace.py +++ b/opentelemetry-sdk/tests/trace/test_trace.py @@ -117,6 +117,7 @@ def test_default_sampler(self): self.assertIsInstance(root_span, trace.Span) child_span = tracer.start_span(name="child span", parent=root_span) self.assertIsInstance(child_span, trace.Span) + self.assertTrue(root_span.context.trace_options.sampled) def test_sampler_no_sampling(self): tracer_source = trace.TracerSource(sampling.ALWAYS_OFF) @@ -251,6 +252,9 @@ def test_start_span_explicit(self): other_parent = trace_api.SpanContext( trace_id=0x000000000000000000000000DEADBEEF, span_id=0x00000000DEADBEF0, + trace_options=trace_api.TraceOptions( + trace_api.TraceOptions.SAMPLED + ), ) self.assertIsNone(tracer.get_current_span()) @@ -630,27 +634,36 @@ def test_ended_span(self): self.assertEqual(root.name, "root") new_status = trace_api.status.Status( - trace_api.status.StatusCanonicalCode.CANCELLED, "Test description", + trace_api.status.StatusCanonicalCode.CANCELLED, "Test description" ) with self.assertLogs(level=WARNING): root.set_status(new_status) self.assertEqual( - root.status.canonical_code, - trace_api.status.StatusCanonicalCode.OK, + root.status.canonical_code, trace_api.status.StatusCanonicalCode.OK ) def test_error_status(self): - try: - with trace.TracerSource().get_tracer(__name__).start_span( - "root" - ) as root: - raise Exception("unknown") - except Exception: # pylint: disable=broad-except - pass - - self.assertIs(root.status.canonical_code, StatusCanonicalCode.UNKNOWN) - self.assertEqual(root.status.description, "Exception: unknown") + def error_status_test(context): + with self.assertRaises(AssertionError): + with context as root: + raise AssertionError("unknown") + + self.assertIs( + root.status.canonical_code, StatusCanonicalCode.UNKNOWN + ) + self.assertEqual( + root.status.description, "AssertionError: unknown" + ) + + error_status_test( + trace.TracerSource().get_tracer(__name__).start_span("root") + ) + error_status_test( + trace.TracerSource() + .get_tracer(__name__) + .start_as_current_span("root") + ) def span_event_start_fmt(span_processor_name, span_name): diff --git a/scripts/eachdist.py b/scripts/eachdist.py index 8d41315fc7a..406afb6ebfd 100755 --- a/scripts/eachdist.py +++ b/scripts/eachdist.py @@ -479,7 +479,7 @@ def lint_args(args): runsubprocess(args.dry_run, ("flake8", rootdir), check=True) execute_args( parse_subargs( - args, ("exec", "pylint {}", "--all", "--mode", "lintroots",), + args, ("exec", "pylint {}", "--all", "--mode", "lintroots") ) ) diff --git a/tox.ini b/tox.ini index 642a8a556b3..51eda59d70c 100644 --- a/tox.ini +++ b/tox.ini @@ -2,9 +2,9 @@ skipsdist = True skip_missing_interpreters = True envlist = - py3{4,5,6,7,8}-test-{api,sdk,example-app,ext-wsgi,ext-flask,ext-http-requests,ext-jaeger,ext-dbapi,ext-mysql,ext-pymongo,ext-zipkin,opentracing-shim} + py3{4,5,6,7,8}-test-{api,sdk,example-app,ext-wsgi,ext-flask,ext-http-requests,ext-jaeger,ext-dbapi,ext-mysql,ext-psycopg2,ext-pymongo,ext-zipkin,opentracing-shim} pypy3-test-{api,sdk,example-app,ext-wsgi,ext-flask,ext-http-requests,ext-jaeger,ext-dbapi,ext-mysql,ext-pymongo,ext-zipkin,opentracing-shim} - py3{4,5,6,7,8}-test-{api,sdk,example-app,example-basic-tracer,example-http,ext-wsgi,ext-flask,ext-http-requests,ext-jaeger,ext-dbapi,ext-mysql,ext-pymongo,ext-zipkin,opentracing-shim} + py3{4,5,6,7,8}-test-{api,sdk,example-app,example-basic-tracer,example-http,ext-wsgi,ext-flask,ext-http-requests,ext-jaeger,ext-dbapi,ext-mysql,ext-psycopg2,ext-pymongo,ext-zipkin,opentracing-shim} pypy3-test-{api,sdk,example-app,example-basic-tracer,example-http,ext-wsgi,ext-flask,ext-http-requests,ext-jaeger,ext-dbapi,ext-mysql,ext-pymongo,ext-zipkin,opentracing-shim} py3{4,5,6,7,8}-coverage @@ -15,10 +15,11 @@ envlist = py37-tracecontext py37-{mypy,mypyinstalled} docs + docker-tests [travis] python = - 3.7: py37, lint, docs + 3.7: py37, lint, docs, docker-tests [testenv] deps = @@ -39,6 +40,7 @@ changedir = test-ext-dbapi: ext/opentelemetry-ext-dbapi/tests test-ext-mysql: ext/opentelemetry-ext-mysql/tests test-ext-pymongo: ext/opentelemetry-ext-pymongo/tests + test-ext-psycopg2: ext/opentelemetry-ext-psycopg2/tests test-ext-wsgi: ext/opentelemetry-ext-wsgi/tests test-ext-zipkin: ext/opentelemetry-ext-zipkin/tests test-ext-flask: ext/opentelemetry-ext-flask/tests @@ -74,10 +76,13 @@ commands_pre = mysql: pip install {toxinidir}/ext/opentelemetry-ext-dbapi mysql: pip install {toxinidir}/ext/opentelemetry-ext-mysql pymongo: pip install {toxinidir}/ext/opentelemetry-ext-pymongo + psycopg2: pip install {toxinidir}/ext/opentelemetry-ext-dbapi + psycopg2: pip install {toxinidir}/ext/opentelemetry-ext-psycopg2 http-requests: pip install {toxinidir}/ext/opentelemetry-ext-http-requests jaeger: pip install {toxinidir}/opentelemetry-sdk jaeger: pip install {toxinidir}/ext/opentelemetry-ext-jaeger opentracing-shim: pip install {toxinidir}/opentelemetry-sdk {toxinidir}/ext/opentelemetry-ext-opentracing-shim + zipkin: pip install {toxinidir}/opentelemetry-sdk zipkin: pip install {toxinidir}/ext/opentelemetry-ext-zipkin ; In order to get a healthy coverage report, @@ -152,3 +157,23 @@ commands_pre = commands = {toxinidir}/scripts/tracecontext-integration-test.sh + +[testenv:docker-tests] +deps = + pytest + docker-compose >= 1.25.2 + pymongo ~= 3.1 + +changedir = + ext/opentelemetry-ext-docker-tests/tests + +commands_pre = + pip install -e {toxinidir}/opentelemetry-api \ + -e {toxinidir}/opentelemetry-sdk \ + -e {toxinidir}/ext/opentelemetry-ext-pymongo + - docker-compose up -d +commands = + pytest {posargs} + +commands_post = + docker-compose down \ No newline at end of file