Thanks to visit codestin.com
Credit goes to github.com

Skip to content

Commit 720f074

Browse files
authored
Merge branch 'master' into grpc-again
2 parents 2ae99f9 + 5fbf999 commit 720f074

File tree

37 files changed

+353
-380
lines changed

37 files changed

+353
-380
lines changed

dev-requirements.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,3 +8,4 @@ sphinx-rtd-theme~=0.4
88
sphinx-autodoc-typehints~=1.10.2
99
pytest!=5.2.3
1010
pytest-cov>=2.8
11+
readme-renderer~=24.0

docs/examples/basic_meter/basic_metrics.py

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -84,21 +84,21 @@ def usage(argv):
8484
label_keys=("environment",),
8585
)
8686

87-
# Labelsets are used to identify key-values that are associated with a specific
87+
# Labels are used to identify key-values that are associated with a specific
8888
# metric that you want to record. These are useful for pre-aggregation and can
8989
# be used to store custom dimensions pertaining to a metric
90-
staging_label_set = meter.get_label_set({"environment": "staging"})
91-
testing_label_set = meter.get_label_set({"environment": "testing"})
90+
staging_labels = {"environment": "staging"}
91+
testing_labels = {"environment": "testing"}
9292

9393
# Update the metric instruments using the direct calling convention
94-
requests_counter.add(25, staging_label_set)
95-
requests_size.record(100, staging_label_set)
94+
requests_counter.add(25, staging_labels)
95+
requests_size.record(100, staging_labels)
9696
time.sleep(5)
9797

98-
requests_counter.add(50, staging_label_set)
99-
requests_size.record(5000, staging_label_set)
98+
requests_counter.add(50, staging_labels)
99+
requests_size.record(5000, staging_labels)
100100
time.sleep(5)
101101

102-
requests_counter.add(35, testing_label_set)
103-
requests_size.record(2, testing_label_set)
102+
requests_counter.add(35, testing_labels)
103+
requests_size.record(2, testing_labels)
104104
time.sleep(5)

docs/examples/basic_meter/calling_conventions.py

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -56,27 +56,27 @@
5656
label_keys=("environment",),
5757
)
5858

59-
label_set = meter.get_label_set({"environment": "staging"})
59+
labels = {"environment": "staging"}
6060

6161
print("Updating using direct calling convention...")
62-
# You can record metrics directly using the metric instrument. You pass in a
63-
# labelset that you would like to record for.
64-
requests_counter.add(25, label_set)
62+
# You can record metrics directly using the metric instrument. You pass in
63+
# labels that you would like to record for.
64+
requests_counter.add(25, labels)
6565
time.sleep(5)
6666

6767
print("Updating using a bound instrument...")
6868
# You can record metrics with bound metric instruments. Bound metric
69-
# instruments are created by passing in a labelset. A bound metric instrument
69+
# instruments are created by passing in labels. A bound metric instrument
7070
# is essentially metric data that corresponds to a specific set of labels.
7171
# Therefore, getting a bound metric instrument using the same set of labels
7272
# will yield the same bound metric instrument.
73-
bound_requests_counter = requests_counter.bind(label_set)
73+
bound_requests_counter = requests_counter.bind(labels)
7474
bound_requests_counter.add(100)
7575
time.sleep(5)
7676

7777
print("Updating using batch calling convention...")
78-
# You can record metrics in a batch by passing in a labelset and a sequence of
78+
# You can record metrics in a batch by passing in labels and a sequence of
7979
# (metric, value) pairs. The value would be recorded for each metric using the
80-
# specified labelset for each.
81-
meter.record_batch(label_set, ((requests_counter, 50), (clicks_counter, 70)))
80+
# specified labels for each.
81+
meter.record_batch(labels, ((requests_counter, 50), (clicks_counter, 70)))
8282
time.sleep(5)

docs/examples/basic_meter/observer.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919
import psutil
2020

2121
from opentelemetry import metrics
22-
from opentelemetry.sdk.metrics import LabelSet, MeterProvider
22+
from opentelemetry.sdk.metrics import MeterProvider
2323
from opentelemetry.sdk.metrics.export import ConsoleMetricsExporter
2424
from opentelemetry.sdk.metrics.export.batcher import UngroupedBatcher
2525
from opentelemetry.sdk.metrics.export.controller import PushController
@@ -35,8 +35,8 @@
3535
# Callback to gather cpu usage
3636
def get_cpu_usage_callback(observer):
3737
for (number, percent) in enumerate(psutil.cpu_percent(percpu=True)):
38-
label_set = meter.get_label_set({"cpu_number": str(number)})
39-
observer.observe(percent, label_set)
38+
labels = {"cpu_number": str(number)}
39+
observer.observe(percent, labels)
4040

4141

4242
meter.register_observer(
@@ -52,7 +52,7 @@ def get_cpu_usage_callback(observer):
5252
# Callback to gather RAM memory usage
5353
def get_ram_usage_callback(observer):
5454
ram_percent = psutil.virtual_memory().percent
55-
observer.observe(ram_percent, LabelSet())
55+
observer.observe(ram_percent, {})
5656

5757

5858
meter.register_observer(

docs/examples/jaeger_exporter/jaeger_exporter.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
#!/usr/bin/env python3
22
#
3-
# Copyright 2020, OpenTelemetry Authors
3+
# Copyright The OpenTelemetry Authors
44
#
55
# Licensed under the Apache License, Version 2.0 (the "License");
66
# you may not use this file except in compliance with the License.

docs/examples/otcollector-metrics/collector.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -40,8 +40,8 @@
4040
label_keys=("environment",),
4141
)
4242

43-
staging_label_set = meter.get_label_set({"environment": "staging"})
44-
requests_counter.add(25, staging_label_set)
43+
staging_labels = {"environment": "staging"}
44+
requests_counter.add(25, staging_labels)
4545

4646
print("Metrics are available now at http://localhost:9090/graph")
4747
input("Press any key to exit...")

docs/examples/otcollector-metrics/docker/prometheus.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
scrape_config:
1+
scrape_configs:
22
- job_name: 'otel-collector'
33
scrape_interval: 5s
44
static_configs:

docs/examples/otcollector-tracer/collector.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
#!/usr/bin/env python3
22
#
3-
# Copyright 2020, OpenTelemetry Authors
3+
# Copyright The OpenTelemetry Authors
44
#
55
# Licensed under the Apache License, Version 2.0 (the "License");
66
# you may not use this file except in compliance with the License.

docs/examples/prometheus/prometheus.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -41,8 +41,8 @@
4141
label_keys=("environment",),
4242
)
4343

44-
staging_label_set = meter.get_label_set({"environment": "staging"})
45-
requests_counter.add(25, staging_label_set)
44+
staging_labels = {"environment": "staging"}
45+
requests_counter.add(25, staging_labels)
4646

4747
print("Metrics are available now at http://localhost:8000/")
4848
input("Press any key to exit...")

docs/getting-started.rst

Lines changed: 28 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -249,7 +249,7 @@ The following is an example of emitting metrics to console, in a similar fashion
249249
exporter = ConsoleMetricsExporter()
250250
controller = PushController(meter, exporter, 5)
251251
252-
staging_label_set = meter.get_label_set({"environment": "staging"})
252+
staging_labels = {"environment": "staging"}
253253
254254
requests_counter = meter.create_metric(
255255
name="requests",
@@ -260,10 +260,10 @@ The following is an example of emitting metrics to console, in a similar fashion
260260
label_keys=("environment",),
261261
)
262262
263-
requests_counter.add(25, staging_label_set)
263+
requests_counter.add(25, staging_labels)
264264
time.sleep(5)
265265
266-
requests_counter.add(20, staging_label_set)
266+
requests_counter.add(20, staging_labels)
267267
time.sleep(5)
268268
269269
@@ -272,8 +272,8 @@ The sleeps will cause the script to take a while, but running it should yield:
272272
.. code-block:: sh
273273
274274
$ python metrics.py
275-
ConsoleMetricsExporter(data="Counter(name="requests", description="number of requests")", label_set="(('environment', 'staging'),)", value=25)
276-
ConsoleMetricsExporter(data="Counter(name="requests", description="number of requests")", label_set="(('environment', 'staging'),)", value=45)
275+
ConsoleMetricsExporter(data="Counter(name="requests", description="number of requests")", labels="(('environment', 'staging'),)", value=25)
276+
ConsoleMetricsExporter(data="Counter(name="requests", description="number of requests")", labels="(('environment', 'staging'),)", value=45)
277277
278278
Using Prometheus
279279
----------------
@@ -285,19 +285,19 @@ Let's start by bringing up a Prometheus instance ourselves, to scrape our applic
285285

286286
.. code-block:: yaml
287287
288-
# prometheus.yml
288+
# /tmp/prometheus.yml
289289
scrape_configs:
290290
- job_name: 'my-app'
291-
scrape_interval: 5s
292-
static_configs:
293-
- targets: ['localhost:8000']
291+
scrape_interval: 5s
292+
static_configs:
293+
- targets: ['localhost:8000']
294294
295295
And start a docker container for it:
296296

297297
.. code-block:: sh
298298
299299
# --net=host will not work properly outside of Linux.
300-
docker run --net=host -v ./prometheus.yml:/etc/prometheus/prometheus.yml prom/prometheus\
300+
docker run --net=host -v /tmp/prometheus.yml:/etc/prometheus/prometheus.yml prom/prometheus \
301301
--log.level=debug --config.file=/etc/prometheus/prometheus.yml
302302
303303
For our Python application, we will need to install an exporter specific to Prometheus:
@@ -331,7 +331,7 @@ And use that instead of the `ConsoleMetricsExporter`:
331331
exporter = PrometheusMetricsExporter("MyAppPrefix")
332332
controller = PushController(meter, exporter, 5)
333333
334-
staging_label_set = meter.get_label_set({"environment": "staging"})
334+
staging_labels = {"environment": "staging"}
335335
336336
requests_counter = meter.create_metric(
337337
name="requests",
@@ -342,10 +342,10 @@ And use that instead of the `ConsoleMetricsExporter`:
342342
label_keys=("environment",),
343343
)
344344
345-
requests_counter.add(25, staging_label_set)
345+
requests_counter.add(25, staging_labels)
346346
time.sleep(5)
347347
348-
requests_counter.add(20, staging_label_set)
348+
requests_counter.add(20, staging_labels)
349349
time.sleep(5)
350350
351351
# This line is added to keep the HTTP server up long enough to scrape.
@@ -371,15 +371,13 @@ To see how this works in practice, let's start the Collector locally. Write the
371371

372372
.. code-block:: yaml
373373
374-
# otel-collector-config.yaml
374+
# /tmp/otel-collector-config.yaml
375375
receivers:
376376
opencensus:
377377
endpoint: 0.0.0.0:55678
378378
exporters:
379379
logging:
380380
loglevel: debug
381-
sampling_initial: 10
382-
sampling_thereafter: 50
383381
processors:
384382
batch:
385383
queued_retry:
@@ -397,8 +395,8 @@ Start the docker container:
397395

398396
.. code-block:: sh
399397
400-
docker run -p 55678:55678\
401-
-v ./otel-collector-config.yaml:/etc/otel-collector-config.yaml\
398+
docker run -p 55678:55678 \
399+
-v /tmp/otel-collector-config.yaml:/etc/otel-collector-config.yaml \
402400
omnition/opentelemetry-collector-contrib:latest \
403401
--config=/etc/otel-collector-config.yaml
404402
@@ -433,6 +431,7 @@ And execute the following script:
433431
)
434432
tracer_provider = TracerProvider()
435433
trace.set_tracer_provider(tracer_provider)
434+
span_processor = BatchExportSpanProcessor(span_exporter)
436435
tracer_provider.add_span_processor(span_processor)
437436
438437
# create a CollectorMetricsExporter
@@ -448,21 +447,25 @@ And execute the following script:
448447
meter = metrics.get_meter(__name__)
449448
# controller collects metrics created from meter and exports it via the
450449
# exporter every interval
451-
controller = PushController(meter, collector_exporter, 5)
450+
controller = PushController(meter, metric_exporter, 5)
452451
453452
# Configure the tracer to use the collector exporter
454453
tracer = trace.get_tracer_provider().get_tracer(__name__)
455454
456455
with tracer.start_as_current_span("foo"):
457456
print("Hello world!")
458457
459-
counter = meter.create_metric(
460-
"requests", "number of requests", "requests", int, Counter, ("environment",),
458+
requests_counter = meter.create_metric(
459+
name="requests",
460+
description="number of requests",
461+
unit="1",
462+
value_type=int,
463+
metric_type=Counter,
464+
label_keys=("environment",),
461465
)
462-
# Labelsets are used to identify key-values that are associated with a specific
466+
# Labels are used to identify key-values that are associated with a specific
463467
# metric that you want to record. These are useful for pre-aggregation and can
464468
# be used to store custom dimensions pertaining to a metric
465-
label_set = meter.get_label_set({"environment": "staging"})
466-
467-
counter.add(25, label_set)
469+
labels = {"environment": "staging"}
470+
requests_counter.add(25, labels)
468471
time.sleep(10) # give push_controller time to push metrics

docs/metrics_example.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717
label_keys=("environment",),
1818
)
1919

20-
staging_label_set = meter.get_label_set({"environment": "staging"})
21-
requests_counter.add(25, staging_label_set)
20+
staging_labels = {"environment": "staging"}
21+
requests_counter.add(25, staging_labels)
2222

2323
input("Press a key to finish...\n")

ext/opentelemetry-ext-dbapi/src/opentelemetry/ext/dbapi/__init__.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -23,15 +23,16 @@
2323
2424
import mysql.connector
2525
import pyodbc
26-
from opentelemetry.trace import tracer_provider
26+
2727
from opentelemetry.ext.dbapi import trace_integration
28+
from opentelemetry.trace import TracerProvider
2829
2930
trace.set_tracer_provider(TracerProvider())
3031
tracer = trace.get_tracer(__name__)
3132
# Ex: mysql.connector
32-
trace_integration(tracer_provider(), mysql.connector, "connect", "mysql", "sql")
33+
trace_integration(tracer, mysql.connector, "connect", "mysql", "sql")
3334
# Ex: pyodbc
34-
trace_integration(tracer_provider(), pyodbc, "Connection", "odbc", "sql")
35+
trace_integration(tracer, pyodbc, "Connection", "odbc", "sql")
3536
3637
API
3738
---

ext/opentelemetry-ext-http-requests/src/opentelemetry/ext/http_requests/__init__.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,9 +23,9 @@
2323
2424
import requests
2525
import opentelemetry.ext.http_requests
26-
from opentelemetry.trace import tracer_provider
26+
from opentelemetry.trace import TracerProvider
2727
28-
opentelemetry.ext.http_requests.enable(tracer_provider())
28+
opentelemetry.ext.http_requests.enable(TracerProvider())
2929
response = requests.get(url='https://www.example.org/')
3030
3131
Limitations

0 commit comments

Comments
 (0)