From 7c4e633e1e715a3d08fda962ab91b20a514cc1bd Mon Sep 17 00:00:00 2001 From: Leighton Date: Thu, 1 Oct 2020 13:55:35 -0400 Subject: [PATCH 01/11] dependency --- .../examples/metrics/auto_collector.py | 2 +- azure_monitor/examples/metrics/standard.py | 39 ++++++++++++++++ .../azure_monitor/export/metrics/__init__.py | 45 ++++++++++++++++++- .../azure_monitor/export/trace/__init__.py | 11 +++++ 4 files changed, 95 insertions(+), 2 deletions(-) create mode 100644 azure_monitor/examples/metrics/standard.py diff --git a/azure_monitor/examples/metrics/auto_collector.py b/azure_monitor/examples/metrics/auto_collector.py index 95c060c..0e4e6e5 100644 --- a/azure_monitor/examples/metrics/auto_collector.py +++ b/azure_monitor/examples/metrics/auto_collector.py @@ -14,7 +14,7 @@ metrics.set_meter_provider(MeterProvider()) meter = metrics.get_meter(__name__) exporter = AzureMonitorMetricsExporter( - connection_string="InstrumentationKey=" + # connection_string="InstrumentationKey=" ) testing_label_set = {"environment": "testing"} diff --git a/azure_monitor/examples/metrics/standard.py b/azure_monitor/examples/metrics/standard.py new file mode 100644 index 0000000..dc248e8 --- /dev/null +++ b/azure_monitor/examples/metrics/standard.py @@ -0,0 +1,39 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +import requests +import time +from opentelemetry import metrics, trace +from opentelemetry.sdk.metrics import MeterProvider +from opentelemetry.sdk.trace import TracerProvider +from opentelemetry.sdk.trace.export import BatchExportSpanProcessor +from opentelemetry.instrumentation.requests import RequestsInstrumentor + +from azure_monitor import AzureMonitorMetricsExporter, AzureMonitorSpanExporter + +metrics.set_meter_provider(MeterProvider(stateful=False)) + +RequestsInstrumentor().instrument() +meter = RequestsInstrumentor().meter + +exporter = AzureMonitorMetricsExporter( + # connection_string="InstrumentationKey=" +) + +metrics.get_meter_provider().start_pipeline(meter, exporter, 5) + +# exporter = AzureMonitorSpanExporter( +# # connection_string="InstrumentationKey=" +# ) + +# trace.set_tracer_provider(TracerProvider()) +# tracer = trace.get_tracer(__name__) +# span_processor = BatchExportSpanProcessor(exporter, schedule_delay_millis=2000) +# trace.get_tracer_provider().add_span_processor(span_processor) + +for i in range(10): + for x in range(10): + requests.get('http://example.com') + time.sleep(2) + time.sleep(5) + +input("Press any key to exit...") diff --git a/azure_monitor/src/azure_monitor/export/metrics/__init__.py b/azure_monitor/src/azure_monitor/export/metrics/__init__.py index 6aab0f6..7048090 100644 --- a/azure_monitor/src/azure_monitor/export/metrics/__init__.py +++ b/azure_monitor/src/azure_monitor/export/metrics/__init__.py @@ -37,6 +37,9 @@ class AzureMonitorMetricsExporter(BaseExporter, MetricsExporter): Args: options: :doc:`export.options` to allow configuration for the exporter """ + def __init__(self, **options): + super().__init__() + self.add_telemetry_processor(standard_metrics_processor) def export( self, metric_records: Sequence[MetricRecord] @@ -79,7 +82,10 @@ def _metric_to_envelope( value = metric_record.aggregator.checkpoint.last elif isinstance(metric, ValueRecorder): # mmsc - value = metric_record.aggregator.checkpoint.count + value = metric_record.aggregator.checkpoint.sum + _min = metric_record.aggregator.checkpoint.min + _max = metric_record.aggregator.checkpoint.max + count = metric_record.aggregator.checkpoint.count else: # sum or lv value = metric_record.aggregator.checkpoint @@ -90,6 +96,9 @@ def _metric_to_envelope( ns=metric.description, name=metric.name, value=value, + min=_min, + max=_max, + count=count, kind=protocol.DataPointType.MEASUREMENT.value, ) @@ -99,3 +108,37 @@ def _metric_to_envelope( data = protocol.MetricData(metrics=[data_point], properties=properties) envelope.data = protocol.Data(base_data=data, base_type="MetricData") return envelope + + +def standard_metrics_processor(envelope): + data = envelope.data.base_data + if len(data.metrics): + properties = {} + point = data.metrics[0] + if point.name == "http.client.duration": + point.name = "Dependency duration" + point.kind = protocol.DataPointType.AGGREGATION.value + properties["_MS.MetricId"] = "dependencies/duration" + properties["_MS.IsAutocollected"] = "True" + properties["cloud/roleInstance"] = utils.azure_monitor_context.get("ai.cloud.roleInstance") + properties["cloud/roleName"] = utils.azure_monitor_context.get("ai.cloud.role") + properties["Dependency.Success"] = "False" + if data.properties.get("http.status_code"): + try: + code = int(data.properties.get("http.status_code")) + if 200 <= code < 400: + properties["Dependency.Success"] = "True" + except ValueError: + pass + # TODO: Check other properties if url doesn't exist + properties["dependency/target"] = data.properties.get("http.url") + properties["Dependency.Type"] = "HTTP" + properties["dependency/resultCode"] = data.properties.get("http.status_code") + # Won't need this once Azure Monitor supports histograms + # We can't actually get the individual buckets because the bucket + # collection must happen on the SDK side + properties["dependency/performanceBucket"] = "" + # TODO: OT does not have this in semantic conventions for trace + properties["operation/synthetic"] = "" + # TODO: Add other std. metrics as implemented + data.properties = properties diff --git a/azure_monitor/src/azure_monitor/export/trace/__init__.py b/azure_monitor/src/azure_monitor/export/trace/__init__.py index dd433be..615cbf2 100644 --- a/azure_monitor/src/azure_monitor/export/trace/__init__.py +++ b/azure_monitor/src/azure_monitor/export/trace/__init__.py @@ -26,6 +26,9 @@ class AzureMonitorSpanExporter(BaseExporter, SpanExporter): Args: options: :doc:`export.options` to allow configuration for the exporter """ + def __init__(self, **options): + super().__init__() + self.add_telemetry_processor(autocollected_metrics_extractor) def export(self, spans: Sequence[Span]) -> SpanExportResult: envelopes = list(map(self._span_to_envelope, spans)) @@ -122,6 +125,7 @@ def convert_span_to_envelope(span: Span) -> protocol.Envelope: "component" in span.attributes and span.attributes["component"] == "http" ): + # TODO: check other component types (e.g. db) data.type = "HTTP" if "http.url" in span.attributes: url = span.attributes["http.url"] @@ -157,3 +161,10 @@ def convert_span_to_envelope(span: Span) -> protocol.Envelope: data.properties["_MS.links"] = json.dumps(links) # TODO: tracestate, tags return envelope + +def autocollected_metrics_extractor(envelope): + name = "Requests" + if envelope.data.base_type == "RemoteDependencyData": + name = "Dependencies" + envelope.data.base_data.properties["_MS.ProcessedByMetricExtractors"] = \ + "(Name:'" + name + "',Ver:'1.1')" From 76dac261e773677fe12f1b463bc24e14bc72f113 Mon Sep 17 00:00:00 2001 From: Leighton Date: Thu, 1 Oct 2020 16:47:05 -0400 Subject: [PATCH 02/11] remove request metrics from autocollection --- .../examples/metrics/auto_collector.py | 4 +- .../sdk/auto_collection/__init__.py | 4 +- .../sdk/auto_collection/request_metrics.py | 19 +++--- .../auto_collection/test_auto_collection.py | 8 +-- .../auto_collection/test_request_metrics.py | 60 ++++++++++++++++--- 5 files changed, 64 insertions(+), 31 deletions(-) diff --git a/azure_monitor/examples/metrics/auto_collector.py b/azure_monitor/examples/metrics/auto_collector.py index 0e4e6e5..74fcd3a 100644 --- a/azure_monitor/examples/metrics/auto_collector.py +++ b/azure_monitor/examples/metrics/auto_collector.py @@ -14,12 +14,12 @@ metrics.set_meter_provider(MeterProvider()) meter = metrics.get_meter(__name__) exporter = AzureMonitorMetricsExporter( - # connection_string="InstrumentationKey=" + connection_string="InstrumentationKey=" ) testing_label_set = {"environment": "testing"} -# Automatically collect standard metrics +# Automatically collect performance counters auto_collection = AutoCollection(meter=meter, labels=testing_label_set) metrics.get_meter_provider().start_pipeline(meter, exporter, 2) diff --git a/azure_monitor/src/azure_monitor/sdk/auto_collection/__init__.py b/azure_monitor/src/azure_monitor/sdk/auto_collection/__init__.py index 3753983..f6773ef 100644 --- a/azure_monitor/src/azure_monitor/sdk/auto_collection/__init__.py +++ b/azure_monitor/src/azure_monitor/sdk/auto_collection/__init__.py @@ -24,8 +24,7 @@ class AutoCollection: - """Starts auto collection of standard metrics, including performance, - dependency and request metrics. + """Starts auto collection of performance counters Args: meter: OpenTelemetry Meter @@ -35,4 +34,3 @@ class AutoCollection: def __init__(self, meter: Meter, labels: Dict[str, str]): col_type = AutoCollectionType.PERF_COUNTER self._performance_metrics = PerformanceMetrics(meter, labels, col_type) - self._request_metrics = RequestMetrics(meter, labels, col_type) diff --git a/azure_monitor/src/azure_monitor/sdk/auto_collection/request_metrics.py b/azure_monitor/src/azure_monitor/sdk/auto_collection/request_metrics.py index 2cbfab4..4be1cc3 100644 --- a/azure_monitor/src/azure_monitor/sdk/auto_collection/request_metrics.py +++ b/azure_monitor/src/azure_monitor/sdk/auto_collection/request_metrics.py @@ -79,29 +79,26 @@ class RequestMetrics: Args: meter: OpenTelemetry Meter labels: Dictionary of labels - collection_type: Standard or Live Metrics """ def __init__( self, meter: Meter, labels: Dict[str, str], - collection_type: AutoCollectionType, ): self._meter = meter self._labels = labels # Patch the HTTPServer handler to track request information HTTPServer.__init__ = server_patch - if collection_type == AutoCollectionType.LIVE_METRICS: - meter.register_observer( - callback=self._track_request_failed_rate, - name="\\ApplicationInsights\\Requests Failed/Sec", - description="Incoming Requests Failed Rate", - unit="rps", - value_type=float, - observer_type=UpDownSumObserver, - ) + meter.register_observer( + callback=self._track_request_failed_rate, + name="\\ApplicationInsights\\Requests Failed/Sec", + description="Incoming Requests Failed Rate", + unit="rps", + value_type=float, + observer_type=UpDownSumObserver, + ) meter.register_observer( callback=self._track_request_duration, name="\\ApplicationInsights\\Request Duration", diff --git a/azure_monitor/tests/auto_collection/test_auto_collection.py b/azure_monitor/tests/auto_collection/test_auto_collection.py index 6c85d12..c59a035 100644 --- a/azure_monitor/tests/auto_collection/test_auto_collection.py +++ b/azure_monitor/tests/auto_collection/test_auto_collection.py @@ -25,16 +25,10 @@ def tearDownClass(cls): @mock.patch( "azure_monitor.sdk.auto_collection.PerformanceMetrics", autospec=True ) - @mock.patch( - "azure_monitor.sdk.auto_collection.RequestMetrics", autospec=True - ) - def test_constructor(self, mock_requests, mock_performance): + def test_constructor(self, mock_performance): """Test the constructor.""" AutoCollection(meter=self._meter, labels=self._test_labels) self.assertEqual(mock_performance.called, True) - self.assertEqual(mock_requests.called, True) self.assertEqual(mock_performance.call_args[0][0], self._meter) self.assertEqual(mock_performance.call_args[0][1], self._test_labels) - self.assertEqual(mock_requests.call_args[0][0], self._meter) - self.assertEqual(mock_requests.call_args[0][1], self._test_labels) diff --git a/azure_monitor/tests/auto_collection/test_request_metrics.py b/azure_monitor/tests/auto_collection/test_request_metrics.py index 2ec6a85..33952ec 100644 --- a/azure_monitor/tests/auto_collection/test_request_metrics.py +++ b/azure_monitor/tests/auto_collection/test_request_metrics.py @@ -9,7 +9,6 @@ from opentelemetry.sdk.metrics import MeterProvider, Observer from azure_monitor.sdk.auto_collection import request_metrics -from azure_monitor.sdk.auto_collection.utils import AutoCollectionType ORIGINAL_CONS = HTTPServer.__init__ @@ -35,11 +34,10 @@ def test_constructor(self): request_metrics_collector = request_metrics.RequestMetrics( meter=mock_meter, labels=self._test_labels, - collection_type=AutoCollectionType.PERF_COUNTER, ) self.assertEqual(request_metrics_collector._meter, mock_meter) self.assertEqual(request_metrics_collector._labels, self._test_labels) - self.assertEqual(mock_meter.register_observer.call_count, 2) + self.assertEqual(mock_meter.register_observer.call_count, 3) create_metric_calls = mock_meter.register_observer.call_args_list create_metric_calls[0].assert_called_with( callback=request_metrics_collector._track_request_duration, @@ -57,11 +55,18 @@ def test_constructor(self): value_type=float, ) + create_metric_calls[2].assert_called_with( + callback=request_metrics_collector._track_request_failed_rate, + name="\\ApplicationInsights\\Requests Failed/Sec", + description="Incoming Requests Failed Rate", + unit="rps", + value_type=float, + ) + def test_track_request_duration(self): request_metrics_collector = request_metrics.RequestMetrics( meter=self._meter, labels=self._test_labels, - collection_type=AutoCollectionType.PERF_COUNTER, ) request_metrics.requests_map["duration"] = 100 request_metrics.requests_map["count"] = 10 @@ -82,7 +87,6 @@ def test_track_request_duration_error(self): request_metrics_collector = request_metrics.RequestMetrics( meter=self._meter, labels=self._test_labels, - collection_type=AutoCollectionType.PERF_COUNTER, ) request_metrics.requests_map["duration"] = 100 request_metrics.requests_map["count"] = 10 @@ -104,7 +108,6 @@ def test_track_request_rate(self, time_mock): request_metrics_collector = request_metrics.RequestMetrics( meter=self._meter, labels=self._test_labels, - collection_type=AutoCollectionType.PERF_COUNTER, ) time_mock.time.return_value = 100 request_metrics.requests_map["last_time"] = 98 @@ -127,7 +130,6 @@ def test_track_request_rate_time_none(self, time_mock): request_metrics_collector = request_metrics.RequestMetrics( meter=self._meter, labels=self._test_labels, - collection_type=AutoCollectionType.PERF_COUNTER, ) request_metrics.requests_map["last_time"] = None obs = Observer( @@ -147,7 +149,6 @@ def test_track_request_rate_error(self, time_mock): request_metrics_collector = request_metrics.RequestMetrics( meter=self._meter, labels=self._test_labels, - collection_type=AutoCollectionType.PERF_COUNTER, ) time_mock.time.return_value = 100 request_metrics.requests_map["last_rate"] = 5.0 @@ -164,6 +165,49 @@ def test_track_request_rate_error(self, time_mock): obs.aggregators[tuple(self._test_labels.items())].current, 5.0 ) + @mock.patch("azure_monitor.sdk.auto_collection.request_metrics.time") + def test_track_request_rate_failed(self, time_mock): + request_metrics_collector = request_metrics.RequestMetrics( + meter=self._meter, + labels=self._test_labels, + ) + time_mock.time.return_value = 100 + request_metrics.requests_map["last_failed_count"] = 5.0 + request_metrics.requests_map["failed_count"] = 25.0 + request_metrics.requests_map["last_time"] = 98 + obs = Observer( + callback=request_metrics_collector._track_request_failed_rate, + name="\\ASP.NET Applications(??APP_W3SVC_PROC??)\\Requests/Sec", + description="Incoming Requests Average Execution Rate", + unit="rps", + value_type=float, + ) + request_metrics_collector._track_request_failed_rate(obs) + self.assertEqual( + obs.aggregators[tuple(self._test_labels.items())].current, 10.0 + ) + + @mock.patch("azure_monitor.sdk.auto_collection.request_metrics.time") + def test_track_request_rate_failed_error(self, time_mock): + request_metrics_collector = request_metrics.RequestMetrics( + meter=self._meter, + labels=self._test_labels, + ) + time_mock.time.return_value = 100 + request_metrics.requests_map["last_rate"] = 5.0 + request_metrics.requests_map["last_time"] = 100 + obs = Observer( + callback=request_metrics_collector._track_request_failed_rate, + name="\\ASP.NET Applications(??APP_W3SVC_PROC??)\\Requests/Sec", + description="Incoming Requests Average Execution Rate", + unit="rps", + value_type=float, + ) + request_metrics_collector._track_request_failed_rate(obs) + self.assertEqual( + obs.aggregators[tuple(self._test_labels.items())].current, 5.0 + ) + def test_request_patch(self): map = request_metrics.requests_map # pylint: disable=redefined-builtin func = mock.Mock() From a172b29d32f88a2bf28e9beb453e41f65b609c8c Mon Sep 17 00:00:00 2001 From: Leighton Date: Thu, 1 Oct 2020 18:02:46 -0400 Subject: [PATCH 03/11] add trace tests --- azure_monitor/CHANGELOG.md | 3 ++ .../azure_monitor/export/trace/__init__.py | 6 +-- azure_monitor/tests/trace/test_trace.py | 41 ++++++++++++++++++- 3 files changed, 45 insertions(+), 5 deletions(-) diff --git a/azure_monitor/CHANGELOG.md b/azure_monitor/CHANGELOG.md index c81c6c8..3d71374 100644 --- a/azure_monitor/CHANGELOG.md +++ b/azure_monitor/CHANGELOG.md @@ -2,6 +2,9 @@ ## Unreleased +- Add support for standard metrics (dependencies) + ([#115](https://github.com/microsoft/opentelemetry-azure-monitor-python/pull/115)) + ## 0.5b.0 Released 2020-09-24 diff --git a/azure_monitor/src/azure_monitor/export/trace/__init__.py b/azure_monitor/src/azure_monitor/export/trace/__init__.py index 615cbf2..d926e27 100644 --- a/azure_monitor/src/azure_monitor/export/trace/__init__.py +++ b/azure_monitor/src/azure_monitor/export/trace/__init__.py @@ -27,8 +27,8 @@ class AzureMonitorSpanExporter(BaseExporter, SpanExporter): options: :doc:`export.options` to allow configuration for the exporter """ def __init__(self, **options): - super().__init__() - self.add_telemetry_processor(autocollected_metrics_extractor) + super().__init__(**options) + self.add_telemetry_processor(indicate_processed_by_metric_extractors) def export(self, spans: Sequence[Span]) -> SpanExportResult: envelopes = list(map(self._span_to_envelope, spans)) @@ -162,7 +162,7 @@ def convert_span_to_envelope(span: Span) -> protocol.Envelope: # TODO: tracestate, tags return envelope -def autocollected_metrics_extractor(envelope): +def indicate_processed_by_metric_extractors(envelope): name = "Requests" if envelope.data.base_type == "RemoteDependencyData": name = "Dependencies" diff --git a/azure_monitor/tests/trace/test_trace.py b/azure_monitor/tests/trace/test_trace.py index e918b92..3329e22 100644 --- a/azure_monitor/tests/trace/test_trace.py +++ b/azure_monitor/tests/trace/test_trace.py @@ -15,7 +15,10 @@ from opentelemetry.trace.status import Status, StatusCanonicalCode from azure_monitor.export import ExportResult -from azure_monitor.export.trace import AzureMonitorSpanExporter +from azure_monitor.export.trace import ( + AzureMonitorSpanExporter, + indicate_processed_by_metric_extractors +) from azure_monitor.options import ExporterOptions TEST_FOLDER = os.path.abspath(".test") @@ -67,6 +70,11 @@ def test_constructor(self): exporter = AzureMonitorSpanExporter( instrumentation_key="4321abcd-5678-4efa-8abc-1234567890ab", storage_path=os.path.join(TEST_FOLDER, self.id()), + storage_max_size=50, + storage_maintenance_period=100, + storage_retention_period=200, + proxies={"asd":"123"}, + timeout=5.0, ) self.assertIsInstance(exporter.options, ExporterOptions) self.assertEqual( @@ -74,8 +82,24 @@ def test_constructor(self): "4321abcd-5678-4efa-8abc-1234567890ab", ) self.assertEqual( - exporter.options.storage_path, os.path.join(TEST_FOLDER, self.id()) + exporter.storage.path, os.path.join(TEST_FOLDER, self.id()) ) + self.assertEqual( + exporter.storage.max_size, 50 + ) + self.assertEqual( + exporter.storage.maintenance_period, 100 + ) + self.assertEqual( + exporter.storage.retention_period, 200 + ) + self.assertEqual( + exporter.options.proxies, {"asd":"123"} + ) + self.assertEqual( + exporter.options.timeout, 5.0 + ) + self.assertEqual(exporter._telemetry_processors[0], indicate_processed_by_metric_extractors) def test_export_empty(self): exporter = self._exporter @@ -121,6 +145,7 @@ def test_export_success(self): storage_mock = mock.Mock() exporter._transmit_from_storage = storage_mock exporter.export([test_span]) + self.assertEqual(len(exporter._telemetry_processors), 1) self.assertEqual(storage_mock.call_count, 1) self.assertEqual(len(os.listdir(exporter.storage.path)), 0) @@ -164,6 +189,18 @@ def test_export_not_retryable(self): result = exporter.export([test_span]) self.assertEqual(result, SpanExportResult.FAILURE) + def test_indicate_processed_by_metric_extractors(self): + envelope = mock.Mock() + envelope.data.base_type = "RemoteDependencyData" + envelope.data.base_data.properties = {} + indicate_processed_by_metric_extractors(envelope) + self.assertEqual(envelope.data.base_data.properties["_MS.ProcessedByMetricExtractors"], + "(Name:'Dependencies',Ver:'1.1')") + envelope.data.base_type = "RequestData" + indicate_processed_by_metric_extractors(envelope) + self.assertEqual(envelope.data.base_data.properties["_MS.ProcessedByMetricExtractors"], + "(Name:'Requests',Ver:'1.1')") + def test_span_to_envelope_none(self): exporter = self._exporter self.assertIsNone(exporter._span_to_envelope(None)) From cee9cc4a6d9e4abbfe044d797c8ae355448c07be Mon Sep 17 00:00:00 2001 From: Leighton Date: Thu, 1 Oct 2020 18:12:57 -0400 Subject: [PATCH 04/11] commit --- README.md | 6 +- azure_monitor/CHANGELOG.md | 3 - azure_monitor/examples/metrics/standard.py | 39 ---- .../azure_monitor/export/trace/__init__.py | 185 ++++++------------ 4 files changed, 61 insertions(+), 172 deletions(-) delete mode 100644 azure_monitor/examples/metrics/standard.py diff --git a/README.md b/README.md index e9c47a6..6764736 100644 --- a/README.md +++ b/README.md @@ -47,11 +47,11 @@ with tracer.start_as_current_span('hello'): print('Hello World!') ``` -#### Integrations +#### Instrumentations -OpenTelemetry also supports several [integrations](https://github.com/open-telemetry/opentelemetry-python/tree/master/ext) which allows to integrate with third party libraries. +OpenTelemetry also supports several [instrumentations](https://github.com/open-telemetry/opentelemetry-python/tree/master/instrumentation) which allows to instrument with third party libraries. -This example shows how to integrate with the [requests](https://2.python-requests.org/en/master/)_ library. +This example shows how to instrument with the [requests](https://2.python-requests.org/en/master/)_ library. * Create an Azure Monitor resource and get the instrumentation key, more information can be found [here](https://docs.microsoft.com/azure/azure-monitor/app/create-new-resource). * Install the `requests` integration package using ``pip install opentelemetry-ext-http-requests``. diff --git a/azure_monitor/CHANGELOG.md b/azure_monitor/CHANGELOG.md index 3d71374..c81c6c8 100644 --- a/azure_monitor/CHANGELOG.md +++ b/azure_monitor/CHANGELOG.md @@ -2,9 +2,6 @@ ## Unreleased -- Add support for standard metrics (dependencies) - ([#115](https://github.com/microsoft/opentelemetry-azure-monitor-python/pull/115)) - ## 0.5b.0 Released 2020-09-24 diff --git a/azure_monitor/examples/metrics/standard.py b/azure_monitor/examples/metrics/standard.py deleted file mode 100644 index dc248e8..0000000 --- a/azure_monitor/examples/metrics/standard.py +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. -import requests -import time -from opentelemetry import metrics, trace -from opentelemetry.sdk.metrics import MeterProvider -from opentelemetry.sdk.trace import TracerProvider -from opentelemetry.sdk.trace.export import BatchExportSpanProcessor -from opentelemetry.instrumentation.requests import RequestsInstrumentor - -from azure_monitor import AzureMonitorMetricsExporter, AzureMonitorSpanExporter - -metrics.set_meter_provider(MeterProvider(stateful=False)) - -RequestsInstrumentor().instrument() -meter = RequestsInstrumentor().meter - -exporter = AzureMonitorMetricsExporter( - # connection_string="InstrumentationKey=" -) - -metrics.get_meter_provider().start_pipeline(meter, exporter, 5) - -# exporter = AzureMonitorSpanExporter( -# # connection_string="InstrumentationKey=" -# ) - -# trace.set_tracer_provider(TracerProvider()) -# tracer = trace.get_tracer(__name__) -# span_processor = BatchExportSpanProcessor(exporter, schedule_delay_millis=2000) -# trace.get_tracer_provider().add_span_processor(span_processor) - -for i in range(10): - for x in range(10): - requests.get('http://example.com') - time.sleep(2) - time.sleep(5) - -input("Press any key to exit...") diff --git a/azure_monitor/src/azure_monitor/export/trace/__init__.py b/azure_monitor/src/azure_monitor/export/trace/__init__.py index d926e27..6aab0f6 100644 --- a/azure_monitor/src/azure_monitor/export/trace/__init__.py +++ b/azure_monitor/src/azure_monitor/export/trace/__init__.py @@ -5,33 +5,43 @@ from typing import Sequence from urllib.parse import urlparse -from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult +from opentelemetry.sdk.metrics import ( + Counter, + SumObserver, + UpDownCounter, + UpDownSumObserver, + ValueObserver, + ValueRecorder, +) +from opentelemetry.sdk.metrics.export import ( + MetricRecord, + MetricsExporter, + MetricsExportResult, +) from opentelemetry.sdk.util import ns_to_iso_str -from opentelemetry.trace import Span, SpanKind -from opentelemetry.trace.status import StatusCanonicalCode +from opentelemetry.util import time_ns from azure_monitor import protocol, utils from azure_monitor.export import ( BaseExporter, ExportResult, - get_trace_export_result, + get_metrics_export_result, ) logger = logging.getLogger(__name__) -class AzureMonitorSpanExporter(BaseExporter, SpanExporter): - """Azure Monitor span exporter for OpenTelemetry. +class AzureMonitorMetricsExporter(BaseExporter, MetricsExporter): + """Azure Monitor metrics exporter for OpenTelemetry. Args: options: :doc:`export.options` to allow configuration for the exporter """ - def __init__(self, **options): - super().__init__(**options) - self.add_telemetry_processor(indicate_processed_by_metric_extractors) - def export(self, spans: Sequence[Span]) -> SpanExportResult: - envelopes = list(map(self._span_to_envelope, spans)) + def export( + self, metric_records: Sequence[MetricRecord] + ) -> MetricsExportResult: + envelopes = list(map(self._metric_to_envelope, metric_records)) envelopes = list( map( lambda x: x.to_dict(), @@ -45,126 +55,47 @@ def export(self, spans: Sequence[Span]) -> SpanExportResult: if result == ExportResult.SUCCESS: # Try to send any cached events self._transmit_from_storage() - return get_trace_export_result(result) + return get_metrics_export_result(result) except Exception: # pylint: disable=broad-except logger.exception("Exception occurred while exporting the data.") - return get_trace_export_result(ExportResult.FAILED_NOT_RETRYABLE) - - # pylint: disable=too-many-statements - # pylint: disable=too-many-branches - def _span_to_envelope(self, span: Span) -> protocol.Envelope: - if not span: - return None - envelope = convert_span_to_envelope(span) - envelope.ikey = self.options.instrumentation_key - return envelope + return get_metrics_export_result(ExportResult.FAILED_NOT_RETRYABLE) + def _metric_to_envelope( + self, metric_record: MetricRecord + ) -> protocol.Envelope: -# pylint: disable=too-many-statements -# pylint: disable=too-many-branches -def convert_span_to_envelope(span: Span) -> protocol.Envelope: - if not span: - return None - envelope = protocol.Envelope( - ikey="", - tags=dict(utils.azure_monitor_context), - time=ns_to_iso_str(span.start_time), - ) - envelope.tags["ai.operation.id"] = "{:032x}".format(span.context.trace_id) - parent = span.parent - if isinstance(parent, Span): - parent = parent.context - if parent: - envelope.tags["ai.operation.parentId"] = "{:016x}".format( - parent.span_id - ) - if span.kind in (SpanKind.CONSUMER, SpanKind.SERVER): - envelope.name = "Microsoft.ApplicationInsights.Request" - data = protocol.Request( - id="{:016x}".format(span.context.span_id), - duration=utils.ns_to_duration(span.end_time - span.start_time), - response_code=str(span.status.canonical_code.value), - success=span.status.canonical_code - == StatusCanonicalCode.OK, # Modify based off attributes or Status - properties={}, - ) - envelope.data = protocol.Data(base_data=data, base_type="RequestData") - if "http.method" in span.attributes: - data.name = span.attributes["http.method"] - if "http.route" in span.attributes: - data.name = data.name + " " + span.attributes["http.route"] - envelope.tags["ai.operation.name"] = data.name - data.properties["request.name"] = data.name - elif "http.path" in span.attributes: - data.properties["request.name"] = ( - data.name + " " + span.attributes["http.path"] - ) - if "http.url" in span.attributes: - data.url = span.attributes["http.url"] - data.properties["request.url"] = span.attributes["http.url"] - if "http.status_code" in span.attributes: - status_code = span.attributes["http.status_code"] - data.response_code = str(status_code) - data.success = 200 <= status_code < 400 - else: - envelope.name = "Microsoft.ApplicationInsights.RemoteDependency" - data = protocol.RemoteDependency( - name=span.name, - id="{:016x}".format(span.context.span_id), - result_code=str(span.status.canonical_code.value), - duration=utils.ns_to_duration(span.end_time - span.start_time), - success=span.status.canonical_code - == StatusCanonicalCode.OK, # Modify based off attributes or Status - properties={}, + if not metric_record: + return None + envelope = protocol.Envelope( + ikey=self.options.instrumentation_key, + tags=dict(utils.azure_monitor_context), + time=ns_to_iso_str(metric_record.aggregator.last_update_timestamp), ) - envelope.data = protocol.Data( - base_data=data, base_type="RemoteDependencyData" + envelope.name = "Microsoft.ApplicationInsights.Metric" + value = 0 + metric = metric_record.instrument + if isinstance(metric, ValueObserver): + # mmscl + value = metric_record.aggregator.checkpoint.last + elif isinstance(metric, ValueRecorder): + # mmsc + value = metric_record.aggregator.checkpoint.count + else: + # sum or lv + value = metric_record.aggregator.checkpoint + if value is None: + logger.warning("Value is none. Default to 0.") + value = 0 + data_point = protocol.DataPoint( + ns=metric.description, + name=metric.name, + value=value, + kind=protocol.DataPointType.MEASUREMENT.value, ) - if span.kind in (SpanKind.CLIENT, SpanKind.PRODUCER): - if ( - "component" in span.attributes - and span.attributes["component"] == "http" - ): - # TODO: check other component types (e.g. db) - data.type = "HTTP" - if "http.url" in span.attributes: - url = span.attributes["http.url"] - # data is the url - data.data = url - parse_url = urlparse(url) - # TODO: error handling, probably put scheme as well - # target matches authority (host:port) - data.target = parse_url.netloc - if "http.method" in span.attributes: - # name is METHOD/path - data.name = ( - span.attributes["http.method"] + "/" + parse_url.path - ) - if "http.status_code" in span.attributes: - status_code = span.attributes["http.status_code"] - data.result_code = str(status_code) - data.success = 200 <= status_code < 400 - else: # SpanKind.INTERNAL - data.type = "InProc" - data.success = True - for key in span.attributes: - # This removes redundant data from ApplicationInsights - if key.startswith("http."): - continue - data.properties[key] = span.attributes[key] - if span.links: - links = [] - for link in span.links: - operation_id = "{:032x}".format(link.context.trace_id) - span_id = "{:016x}".format(link.context.span_id) - links.append({"operation_Id": operation_id, "id": span_id}) - data.properties["_MS.links"] = json.dumps(links) - # TODO: tracestate, tags - return envelope -def indicate_processed_by_metric_extractors(envelope): - name = "Requests" - if envelope.data.base_type == "RemoteDependencyData": - name = "Dependencies" - envelope.data.base_data.properties["_MS.ProcessedByMetricExtractors"] = \ - "(Name:'" + name + "',Ver:'1.1')" + properties = {} + for label_tuple in metric_record.labels: + properties[label_tuple[0]] = label_tuple[1] + data = protocol.MetricData(metrics=[data_point], properties=properties) + envelope.data = protocol.Data(base_data=data, base_type="MetricData") + return envelope From d6e0d80dd137953a89a6c064e6b98aae06f83c1d Mon Sep 17 00:00:00 2001 From: Leighton Date: Thu, 1 Oct 2020 18:14:49 -0400 Subject: [PATCH 05/11] fix --- .../azure_monitor/export/metrics/__init__.py | 45 +---- .../azure_monitor/export/trace/__init__.py | 185 ++++++++++++------ 2 files changed, 128 insertions(+), 102 deletions(-) diff --git a/azure_monitor/src/azure_monitor/export/metrics/__init__.py b/azure_monitor/src/azure_monitor/export/metrics/__init__.py index 7048090..6aab0f6 100644 --- a/azure_monitor/src/azure_monitor/export/metrics/__init__.py +++ b/azure_monitor/src/azure_monitor/export/metrics/__init__.py @@ -37,9 +37,6 @@ class AzureMonitorMetricsExporter(BaseExporter, MetricsExporter): Args: options: :doc:`export.options` to allow configuration for the exporter """ - def __init__(self, **options): - super().__init__() - self.add_telemetry_processor(standard_metrics_processor) def export( self, metric_records: Sequence[MetricRecord] @@ -82,10 +79,7 @@ def _metric_to_envelope( value = metric_record.aggregator.checkpoint.last elif isinstance(metric, ValueRecorder): # mmsc - value = metric_record.aggregator.checkpoint.sum - _min = metric_record.aggregator.checkpoint.min - _max = metric_record.aggregator.checkpoint.max - count = metric_record.aggregator.checkpoint.count + value = metric_record.aggregator.checkpoint.count else: # sum or lv value = metric_record.aggregator.checkpoint @@ -96,9 +90,6 @@ def _metric_to_envelope( ns=metric.description, name=metric.name, value=value, - min=_min, - max=_max, - count=count, kind=protocol.DataPointType.MEASUREMENT.value, ) @@ -108,37 +99,3 @@ def _metric_to_envelope( data = protocol.MetricData(metrics=[data_point], properties=properties) envelope.data = protocol.Data(base_data=data, base_type="MetricData") return envelope - - -def standard_metrics_processor(envelope): - data = envelope.data.base_data - if len(data.metrics): - properties = {} - point = data.metrics[0] - if point.name == "http.client.duration": - point.name = "Dependency duration" - point.kind = protocol.DataPointType.AGGREGATION.value - properties["_MS.MetricId"] = "dependencies/duration" - properties["_MS.IsAutocollected"] = "True" - properties["cloud/roleInstance"] = utils.azure_monitor_context.get("ai.cloud.roleInstance") - properties["cloud/roleName"] = utils.azure_monitor_context.get("ai.cloud.role") - properties["Dependency.Success"] = "False" - if data.properties.get("http.status_code"): - try: - code = int(data.properties.get("http.status_code")) - if 200 <= code < 400: - properties["Dependency.Success"] = "True" - except ValueError: - pass - # TODO: Check other properties if url doesn't exist - properties["dependency/target"] = data.properties.get("http.url") - properties["Dependency.Type"] = "HTTP" - properties["dependency/resultCode"] = data.properties.get("http.status_code") - # Won't need this once Azure Monitor supports histograms - # We can't actually get the individual buckets because the bucket - # collection must happen on the SDK side - properties["dependency/performanceBucket"] = "" - # TODO: OT does not have this in semantic conventions for trace - properties["operation/synthetic"] = "" - # TODO: Add other std. metrics as implemented - data.properties = properties diff --git a/azure_monitor/src/azure_monitor/export/trace/__init__.py b/azure_monitor/src/azure_monitor/export/trace/__init__.py index 6aab0f6..d926e27 100644 --- a/azure_monitor/src/azure_monitor/export/trace/__init__.py +++ b/azure_monitor/src/azure_monitor/export/trace/__init__.py @@ -5,43 +5,33 @@ from typing import Sequence from urllib.parse import urlparse -from opentelemetry.sdk.metrics import ( - Counter, - SumObserver, - UpDownCounter, - UpDownSumObserver, - ValueObserver, - ValueRecorder, -) -from opentelemetry.sdk.metrics.export import ( - MetricRecord, - MetricsExporter, - MetricsExportResult, -) +from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult from opentelemetry.sdk.util import ns_to_iso_str -from opentelemetry.util import time_ns +from opentelemetry.trace import Span, SpanKind +from opentelemetry.trace.status import StatusCanonicalCode from azure_monitor import protocol, utils from azure_monitor.export import ( BaseExporter, ExportResult, - get_metrics_export_result, + get_trace_export_result, ) logger = logging.getLogger(__name__) -class AzureMonitorMetricsExporter(BaseExporter, MetricsExporter): - """Azure Monitor metrics exporter for OpenTelemetry. +class AzureMonitorSpanExporter(BaseExporter, SpanExporter): + """Azure Monitor span exporter for OpenTelemetry. Args: options: :doc:`export.options` to allow configuration for the exporter """ + def __init__(self, **options): + super().__init__(**options) + self.add_telemetry_processor(indicate_processed_by_metric_extractors) - def export( - self, metric_records: Sequence[MetricRecord] - ) -> MetricsExportResult: - envelopes = list(map(self._metric_to_envelope, metric_records)) + def export(self, spans: Sequence[Span]) -> SpanExportResult: + envelopes = list(map(self._span_to_envelope, spans)) envelopes = list( map( lambda x: x.to_dict(), @@ -55,47 +45,126 @@ def export( if result == ExportResult.SUCCESS: # Try to send any cached events self._transmit_from_storage() - return get_metrics_export_result(result) + return get_trace_export_result(result) except Exception: # pylint: disable=broad-except logger.exception("Exception occurred while exporting the data.") - return get_metrics_export_result(ExportResult.FAILED_NOT_RETRYABLE) - - def _metric_to_envelope( - self, metric_record: MetricRecord - ) -> protocol.Envelope: + return get_trace_export_result(ExportResult.FAILED_NOT_RETRYABLE) - if not metric_record: + # pylint: disable=too-many-statements + # pylint: disable=too-many-branches + def _span_to_envelope(self, span: Span) -> protocol.Envelope: + if not span: return None - envelope = protocol.Envelope( - ikey=self.options.instrumentation_key, - tags=dict(utils.azure_monitor_context), - time=ns_to_iso_str(metric_record.aggregator.last_update_timestamp), + envelope = convert_span_to_envelope(span) + envelope.ikey = self.options.instrumentation_key + return envelope + + +# pylint: disable=too-many-statements +# pylint: disable=too-many-branches +def convert_span_to_envelope(span: Span) -> protocol.Envelope: + if not span: + return None + envelope = protocol.Envelope( + ikey="", + tags=dict(utils.azure_monitor_context), + time=ns_to_iso_str(span.start_time), + ) + envelope.tags["ai.operation.id"] = "{:032x}".format(span.context.trace_id) + parent = span.parent + if isinstance(parent, Span): + parent = parent.context + if parent: + envelope.tags["ai.operation.parentId"] = "{:016x}".format( + parent.span_id + ) + if span.kind in (SpanKind.CONSUMER, SpanKind.SERVER): + envelope.name = "Microsoft.ApplicationInsights.Request" + data = protocol.Request( + id="{:016x}".format(span.context.span_id), + duration=utils.ns_to_duration(span.end_time - span.start_time), + response_code=str(span.status.canonical_code.value), + success=span.status.canonical_code + == StatusCanonicalCode.OK, # Modify based off attributes or Status + properties={}, ) - envelope.name = "Microsoft.ApplicationInsights.Metric" - value = 0 - metric = metric_record.instrument - if isinstance(metric, ValueObserver): - # mmscl - value = metric_record.aggregator.checkpoint.last - elif isinstance(metric, ValueRecorder): - # mmsc - value = metric_record.aggregator.checkpoint.count - else: - # sum or lv - value = metric_record.aggregator.checkpoint - if value is None: - logger.warning("Value is none. Default to 0.") - value = 0 - data_point = protocol.DataPoint( - ns=metric.description, - name=metric.name, - value=value, - kind=protocol.DataPointType.MEASUREMENT.value, + envelope.data = protocol.Data(base_data=data, base_type="RequestData") + if "http.method" in span.attributes: + data.name = span.attributes["http.method"] + if "http.route" in span.attributes: + data.name = data.name + " " + span.attributes["http.route"] + envelope.tags["ai.operation.name"] = data.name + data.properties["request.name"] = data.name + elif "http.path" in span.attributes: + data.properties["request.name"] = ( + data.name + " " + span.attributes["http.path"] + ) + if "http.url" in span.attributes: + data.url = span.attributes["http.url"] + data.properties["request.url"] = span.attributes["http.url"] + if "http.status_code" in span.attributes: + status_code = span.attributes["http.status_code"] + data.response_code = str(status_code) + data.success = 200 <= status_code < 400 + else: + envelope.name = "Microsoft.ApplicationInsights.RemoteDependency" + data = protocol.RemoteDependency( + name=span.name, + id="{:016x}".format(span.context.span_id), + result_code=str(span.status.canonical_code.value), + duration=utils.ns_to_duration(span.end_time - span.start_time), + success=span.status.canonical_code + == StatusCanonicalCode.OK, # Modify based off attributes or Status + properties={}, ) + envelope.data = protocol.Data( + base_data=data, base_type="RemoteDependencyData" + ) + if span.kind in (SpanKind.CLIENT, SpanKind.PRODUCER): + if ( + "component" in span.attributes + and span.attributes["component"] == "http" + ): + # TODO: check other component types (e.g. db) + data.type = "HTTP" + if "http.url" in span.attributes: + url = span.attributes["http.url"] + # data is the url + data.data = url + parse_url = urlparse(url) + # TODO: error handling, probably put scheme as well + # target matches authority (host:port) + data.target = parse_url.netloc + if "http.method" in span.attributes: + # name is METHOD/path + data.name = ( + span.attributes["http.method"] + "/" + parse_url.path + ) + if "http.status_code" in span.attributes: + status_code = span.attributes["http.status_code"] + data.result_code = str(status_code) + data.success = 200 <= status_code < 400 + else: # SpanKind.INTERNAL + data.type = "InProc" + data.success = True + for key in span.attributes: + # This removes redundant data from ApplicationInsights + if key.startswith("http."): + continue + data.properties[key] = span.attributes[key] + if span.links: + links = [] + for link in span.links: + operation_id = "{:032x}".format(link.context.trace_id) + span_id = "{:016x}".format(link.context.span_id) + links.append({"operation_Id": operation_id, "id": span_id}) + data.properties["_MS.links"] = json.dumps(links) + # TODO: tracestate, tags + return envelope - properties = {} - for label_tuple in metric_record.labels: - properties[label_tuple[0]] = label_tuple[1] - data = protocol.MetricData(metrics=[data_point], properties=properties) - envelope.data = protocol.Data(base_data=data, base_type="MetricData") - return envelope +def indicate_processed_by_metric_extractors(envelope): + name = "Requests" + if envelope.data.base_type == "RemoteDependencyData": + name = "Dependencies" + envelope.data.base_data.properties["_MS.ProcessedByMetricExtractors"] = \ + "(Name:'" + name + "',Ver:'1.1')" From 25b2fb2b7b421bcbea4e3fb9be028a6be34e15b4 Mon Sep 17 00:00:00 2001 From: Leighton Date: Thu, 1 Oct 2020 18:19:26 -0400 Subject: [PATCH 06/11] fix test --- .../azure_monitor/sdk/auto_collection/live_metrics/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/azure_monitor/src/azure_monitor/sdk/auto_collection/live_metrics/__init__.py b/azure_monitor/src/azure_monitor/sdk/auto_collection/live_metrics/__init__.py index 9e41b00..a4850bb 100644 --- a/azure_monitor/src/azure_monitor/sdk/auto_collection/live_metrics/__init__.py +++ b/azure_monitor/src/azure_monitor/sdk/auto_collection/live_metrics/__init__.py @@ -42,7 +42,7 @@ def __init__( col_type = AutoCollectionType.LIVE_METRICS self._performance_metrics = PerformanceMetrics(meter, labels, col_type) self._dependency_metrics = DependencyMetrics(meter, labels) - self._request_metrics = RequestMetrics(meter, labels, col_type) + self._request_metrics = RequestMetrics(meter, labels) self._manager = LiveMetricsManager( meter, instrumentation_key, span_processor ) From e89093bc94673b4072f28f733ef5831fbd25fb2b Mon Sep 17 00:00:00 2001 From: Leighton Date: Thu, 1 Oct 2020 18:26:05 -0400 Subject: [PATCH 07/11] lint --- .../azure_monitor/export/trace/__init__.py | 7 +++- .../sdk/auto_collection/dependency_metrics.py | 6 +-- .../auto_collection/performance_metrics.py | 10 ++--- .../sdk/auto_collection/request_metrics.py | 6 +-- .../sdk/auto_collection/utils.py | 3 +- azure_monitor/tests/trace/test_trace.py | 41 ++++++++++--------- 6 files changed, 38 insertions(+), 35 deletions(-) diff --git a/azure_monitor/src/azure_monitor/export/trace/__init__.py b/azure_monitor/src/azure_monitor/export/trace/__init__.py index d926e27..bc06989 100644 --- a/azure_monitor/src/azure_monitor/export/trace/__init__.py +++ b/azure_monitor/src/azure_monitor/export/trace/__init__.py @@ -26,6 +26,7 @@ class AzureMonitorSpanExporter(BaseExporter, SpanExporter): Args: options: :doc:`export.options` to allow configuration for the exporter """ + def __init__(self, **options): super().__init__(**options) self.add_telemetry_processor(indicate_processed_by_metric_extractors) @@ -162,9 +163,11 @@ def convert_span_to_envelope(span: Span) -> protocol.Envelope: # TODO: tracestate, tags return envelope + def indicate_processed_by_metric_extractors(envelope): name = "Requests" if envelope.data.base_type == "RemoteDependencyData": name = "Dependencies" - envelope.data.base_data.properties["_MS.ProcessedByMetricExtractors"] = \ - "(Name:'" + name + "',Ver:'1.1')" + envelope.data.base_data.properties["_MS.ProcessedByMetricExtractors"] = ( + "(Name:'" + name + "',Ver:'1.1')" + ) diff --git a/azure_monitor/src/azure_monitor/sdk/auto_collection/dependency_metrics.py b/azure_monitor/src/azure_monitor/sdk/auto_collection/dependency_metrics.py index 6229351..1afc829 100644 --- a/azure_monitor/src/azure_monitor/sdk/auto_collection/dependency_metrics.py +++ b/azure_monitor/src/azure_monitor/sdk/auto_collection/dependency_metrics.py @@ -91,7 +91,7 @@ def __init__(self, meter: Meter, labels: Dict[str, str]): ) def _track_dependency_rate(self, observer: Observer) -> None: - """ Track Dependency rate + """Track Dependency rate Calculated by obtaining the number of outgoing requests made using the requests library within an elapsed time and dividing @@ -121,7 +121,7 @@ def _track_dependency_rate(self, observer: Observer) -> None: observer.observe(last_result, self._labels) def _track_dependency_duration(self, observer: Observer) -> None: - """ Track Dependency average duration + """Track Dependency average duration Calculated by getting the time it takes to make an outgoing request and dividing over the amount of outgoing requests over an elapsed time. @@ -149,7 +149,7 @@ def _track_dependency_duration(self, observer: Observer) -> None: observer.observe(last_average_duration, self._labels) def _track_failure_rate(self, observer: Observer) -> None: - """ Track Failed Dependency rate + """Track Failed Dependency rate Calculated by obtaining the number of failed outgoing requests made using the requests library within an elapsed time and dividing diff --git a/azure_monitor/src/azure_monitor/sdk/auto_collection/performance_metrics.py b/azure_monitor/src/azure_monitor/sdk/auto_collection/performance_metrics.py index 417e068..a1abcc2 100644 --- a/azure_monitor/src/azure_monitor/sdk/auto_collection/performance_metrics.py +++ b/azure_monitor/src/azure_monitor/sdk/auto_collection/performance_metrics.py @@ -79,7 +79,7 @@ def __init__( ) def _track_cpu(self, observer: Observer) -> None: - """ Track CPU time + """Track CPU time Processor time is defined as a float representing the current system wide CPU utilization minus idle CPU time as a percentage. Idle CPU @@ -90,7 +90,7 @@ def _track_cpu(self, observer: Observer) -> None: observer.observe(100.0 - cpu_times_percent.idle, self._labels) def _track_memory(self, observer: Observer) -> None: - """ Track Memory + """Track Memory Available memory is defined as memory that can be given instantly to processes without the system going into swap. @@ -98,7 +98,7 @@ def _track_memory(self, observer: Observer) -> None: observer.observe(psutil.virtual_memory().available, self._labels) def _track_process_cpu(self, observer: Observer) -> None: - """ Track Process CPU time + """Track Process CPU time Returns a derived gauge for the CPU usage for the current process. Return values range from 0.0 to 100.0 inclusive. @@ -113,7 +113,7 @@ def _track_process_cpu(self, observer: Observer) -> None: logger.warning("Error handling get process cpu usage.") def _track_process_memory(self, observer: Observer) -> None: - """ Track Memory + """Track Memory Available memory is defined as memory that can be given instantly to processes without the system going into swap. @@ -124,7 +124,7 @@ def _track_process_memory(self, observer: Observer) -> None: logger.warning("Error handling get process private bytes.") def _track_commited_memory(self, observer: Observer) -> None: - """ Track Commited Memory + """Track Commited Memory Available commited memory is defined as total memory minus available memory. """ diff --git a/azure_monitor/src/azure_monitor/sdk/auto_collection/request_metrics.py b/azure_monitor/src/azure_monitor/sdk/auto_collection/request_metrics.py index 4be1cc3..7a0b36d 100644 --- a/azure_monitor/src/azure_monitor/sdk/auto_collection/request_metrics.py +++ b/azure_monitor/src/azure_monitor/sdk/auto_collection/request_metrics.py @@ -117,7 +117,7 @@ def __init__( ) def _track_request_duration(self, observer: Observer) -> None: - """ Track Request execution time + """Track Request execution time Calculated by getting the time it takes to make an incoming request and dividing over the amount of incoming requests over an elapsed time. @@ -141,7 +141,7 @@ def _track_request_duration(self, observer: Observer) -> None: observer.observe(last_average_duration, self._labels) def _track_request_rate(self, observer: Observer) -> None: - """ Track Request execution rate + """Track Request execution rate Calculated by obtaining by getting the number of incoming requests made to an HTTPServer within an elapsed time and dividing that value @@ -171,7 +171,7 @@ def _track_request_rate(self, observer: Observer) -> None: observer.observe(last_rate, self._labels) def _track_request_failed_rate(self, observer: Observer) -> None: - """ Track Request failed execution rate + """Track Request failed execution rate Calculated by obtaining by getting the number of failed incoming requests made to an HTTPServer within an elapsed time and dividing that value diff --git a/azure_monitor/src/azure_monitor/sdk/auto_collection/utils.py b/azure_monitor/src/azure_monitor/sdk/auto_collection/utils.py index 8883b11..6ca330c 100644 --- a/azure_monitor/src/azure_monitor/sdk/auto_collection/utils.py +++ b/azure_monitor/src/azure_monitor/sdk/auto_collection/utils.py @@ -4,8 +4,7 @@ class AutoCollectionType(Enum): - """Automatic collection of metrics type - """ + """Automatic collection of metrics type""" PERF_COUNTER = 0 LIVE_METRICS = 1 diff --git a/azure_monitor/tests/trace/test_trace.py b/azure_monitor/tests/trace/test_trace.py index 3329e22..bea8033 100644 --- a/azure_monitor/tests/trace/test_trace.py +++ b/azure_monitor/tests/trace/test_trace.py @@ -17,7 +17,7 @@ from azure_monitor.export import ExportResult from azure_monitor.export.trace import ( AzureMonitorSpanExporter, - indicate_processed_by_metric_extractors + indicate_processed_by_metric_extractors, ) from azure_monitor.options import ExporterOptions @@ -73,7 +73,7 @@ def test_constructor(self): storage_max_size=50, storage_maintenance_period=100, storage_retention_period=200, - proxies={"asd":"123"}, + proxies={"asd": "123"}, timeout=5.0, ) self.assertIsInstance(exporter.options, ExporterOptions) @@ -84,22 +84,15 @@ def test_constructor(self): self.assertEqual( exporter.storage.path, os.path.join(TEST_FOLDER, self.id()) ) + self.assertEqual(exporter.storage.max_size, 50) + self.assertEqual(exporter.storage.maintenance_period, 100) + self.assertEqual(exporter.storage.retention_period, 200) + self.assertEqual(exporter.options.proxies, {"asd": "123"}) + self.assertEqual(exporter.options.timeout, 5.0) self.assertEqual( - exporter.storage.max_size, 50 + exporter._telemetry_processors[0], + indicate_processed_by_metric_extractors, ) - self.assertEqual( - exporter.storage.maintenance_period, 100 - ) - self.assertEqual( - exporter.storage.retention_period, 200 - ) - self.assertEqual( - exporter.options.proxies, {"asd":"123"} - ) - self.assertEqual( - exporter.options.timeout, 5.0 - ) - self.assertEqual(exporter._telemetry_processors[0], indicate_processed_by_metric_extractors) def test_export_empty(self): exporter = self._exporter @@ -194,12 +187,20 @@ def test_indicate_processed_by_metric_extractors(self): envelope.data.base_type = "RemoteDependencyData" envelope.data.base_data.properties = {} indicate_processed_by_metric_extractors(envelope) - self.assertEqual(envelope.data.base_data.properties["_MS.ProcessedByMetricExtractors"], - "(Name:'Dependencies',Ver:'1.1')") + self.assertEqual( + envelope.data.base_data.properties[ + "_MS.ProcessedByMetricExtractors" + ], + "(Name:'Dependencies',Ver:'1.1')", + ) envelope.data.base_type = "RequestData" indicate_processed_by_metric_extractors(envelope) - self.assertEqual(envelope.data.base_data.properties["_MS.ProcessedByMetricExtractors"], - "(Name:'Requests',Ver:'1.1')") + self.assertEqual( + envelope.data.base_data.properties[ + "_MS.ProcessedByMetricExtractors" + ], + "(Name:'Requests',Ver:'1.1')", + ) def test_span_to_envelope_none(self): exporter = self._exporter From 3d65d1a8f4922a0862eb010339f278f88bc26535 Mon Sep 17 00:00:00 2001 From: Leighton Date: Thu, 1 Oct 2020 18:30:26 -0400 Subject: [PATCH 08/11] lint --- .../sdk/auto_collection/request_metrics.py | 4 +--- .../auto_collection/test_request_metrics.py | 24 +++++++------------ 2 files changed, 9 insertions(+), 19 deletions(-) diff --git a/azure_monitor/src/azure_monitor/sdk/auto_collection/request_metrics.py b/azure_monitor/src/azure_monitor/sdk/auto_collection/request_metrics.py index 7a0b36d..aba461c 100644 --- a/azure_monitor/src/azure_monitor/sdk/auto_collection/request_metrics.py +++ b/azure_monitor/src/azure_monitor/sdk/auto_collection/request_metrics.py @@ -82,9 +82,7 @@ class RequestMetrics: """ def __init__( - self, - meter: Meter, - labels: Dict[str, str], + self, meter: Meter, labels: Dict[str, str], ): self._meter = meter self._labels = labels diff --git a/azure_monitor/tests/auto_collection/test_request_metrics.py b/azure_monitor/tests/auto_collection/test_request_metrics.py index 33952ec..bccde83 100644 --- a/azure_monitor/tests/auto_collection/test_request_metrics.py +++ b/azure_monitor/tests/auto_collection/test_request_metrics.py @@ -32,8 +32,7 @@ def setUp(self): def test_constructor(self): mock_meter = mock.Mock() request_metrics_collector = request_metrics.RequestMetrics( - meter=mock_meter, - labels=self._test_labels, + meter=mock_meter, labels=self._test_labels, ) self.assertEqual(request_metrics_collector._meter, mock_meter) self.assertEqual(request_metrics_collector._labels, self._test_labels) @@ -65,8 +64,7 @@ def test_constructor(self): def test_track_request_duration(self): request_metrics_collector = request_metrics.RequestMetrics( - meter=self._meter, - labels=self._test_labels, + meter=mock_meter, labels=self._test_labels, ) request_metrics.requests_map["duration"] = 100 request_metrics.requests_map["count"] = 10 @@ -85,8 +83,7 @@ def test_track_request_duration(self): def test_track_request_duration_error(self): request_metrics_collector = request_metrics.RequestMetrics( - meter=self._meter, - labels=self._test_labels, + meter=mock_meter, labels=self._test_labels, ) request_metrics.requests_map["duration"] = 100 request_metrics.requests_map["count"] = 10 @@ -106,8 +103,7 @@ def test_track_request_duration_error(self): @mock.patch("azure_monitor.sdk.auto_collection.request_metrics.time") def test_track_request_rate(self, time_mock): request_metrics_collector = request_metrics.RequestMetrics( - meter=self._meter, - labels=self._test_labels, + meter=mock_meter, labels=self._test_labels, ) time_mock.time.return_value = 100 request_metrics.requests_map["last_time"] = 98 @@ -128,8 +124,7 @@ def test_track_request_rate(self, time_mock): def test_track_request_rate_time_none(self, time_mock): time_mock.time.return_value = 100 request_metrics_collector = request_metrics.RequestMetrics( - meter=self._meter, - labels=self._test_labels, + meter=mock_meter, labels=self._test_labels, ) request_metrics.requests_map["last_time"] = None obs = Observer( @@ -147,8 +142,7 @@ def test_track_request_rate_time_none(self, time_mock): @mock.patch("azure_monitor.sdk.auto_collection.request_metrics.time") def test_track_request_rate_error(self, time_mock): request_metrics_collector = request_metrics.RequestMetrics( - meter=self._meter, - labels=self._test_labels, + meter=mock_meter, labels=self._test_labels, ) time_mock.time.return_value = 100 request_metrics.requests_map["last_rate"] = 5.0 @@ -168,8 +162,7 @@ def test_track_request_rate_error(self, time_mock): @mock.patch("azure_monitor.sdk.auto_collection.request_metrics.time") def test_track_request_rate_failed(self, time_mock): request_metrics_collector = request_metrics.RequestMetrics( - meter=self._meter, - labels=self._test_labels, + meter=mock_meter, labels=self._test_labels, ) time_mock.time.return_value = 100 request_metrics.requests_map["last_failed_count"] = 5.0 @@ -190,8 +183,7 @@ def test_track_request_rate_failed(self, time_mock): @mock.patch("azure_monitor.sdk.auto_collection.request_metrics.time") def test_track_request_rate_failed_error(self, time_mock): request_metrics_collector = request_metrics.RequestMetrics( - meter=self._meter, - labels=self._test_labels, + meter=mock_meter, labels=self._test_labels, ) time_mock.time.return_value = 100 request_metrics.requests_map["last_rate"] = 5.0 From ea264c6a61b49f8603b9ddab62b20b95f5bdacc5 Mon Sep 17 00:00:00 2001 From: Leighton Date: Thu, 1 Oct 2020 18:34:25 -0400 Subject: [PATCH 09/11] Update test_request_metrics.py --- .../tests/auto_collection/test_request_metrics.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/azure_monitor/tests/auto_collection/test_request_metrics.py b/azure_monitor/tests/auto_collection/test_request_metrics.py index bccde83..62b1209 100644 --- a/azure_monitor/tests/auto_collection/test_request_metrics.py +++ b/azure_monitor/tests/auto_collection/test_request_metrics.py @@ -64,7 +64,7 @@ def test_constructor(self): def test_track_request_duration(self): request_metrics_collector = request_metrics.RequestMetrics( - meter=mock_meter, labels=self._test_labels, + meter=self._meter, labels=self._test_labels, ) request_metrics.requests_map["duration"] = 100 request_metrics.requests_map["count"] = 10 @@ -83,7 +83,7 @@ def test_track_request_duration(self): def test_track_request_duration_error(self): request_metrics_collector = request_metrics.RequestMetrics( - meter=mock_meter, labels=self._test_labels, + meter=self._meter, labels=self._test_labels, ) request_metrics.requests_map["duration"] = 100 request_metrics.requests_map["count"] = 10 @@ -103,7 +103,7 @@ def test_track_request_duration_error(self): @mock.patch("azure_monitor.sdk.auto_collection.request_metrics.time") def test_track_request_rate(self, time_mock): request_metrics_collector = request_metrics.RequestMetrics( - meter=mock_meter, labels=self._test_labels, + meter=self._meter, labels=self._test_labels, ) time_mock.time.return_value = 100 request_metrics.requests_map["last_time"] = 98 @@ -124,7 +124,7 @@ def test_track_request_rate(self, time_mock): def test_track_request_rate_time_none(self, time_mock): time_mock.time.return_value = 100 request_metrics_collector = request_metrics.RequestMetrics( - meter=mock_meter, labels=self._test_labels, + meter=self._meter, labels=self._test_labels, ) request_metrics.requests_map["last_time"] = None obs = Observer( @@ -142,7 +142,7 @@ def test_track_request_rate_time_none(self, time_mock): @mock.patch("azure_monitor.sdk.auto_collection.request_metrics.time") def test_track_request_rate_error(self, time_mock): request_metrics_collector = request_metrics.RequestMetrics( - meter=mock_meter, labels=self._test_labels, + meter=self._meter, labels=self._test_labels, ) time_mock.time.return_value = 100 request_metrics.requests_map["last_rate"] = 5.0 @@ -162,7 +162,7 @@ def test_track_request_rate_error(self, time_mock): @mock.patch("azure_monitor.sdk.auto_collection.request_metrics.time") def test_track_request_rate_failed(self, time_mock): request_metrics_collector = request_metrics.RequestMetrics( - meter=mock_meter, labels=self._test_labels, + meter=self._meter, labels=self._test_labels, ) time_mock.time.return_value = 100 request_metrics.requests_map["last_failed_count"] = 5.0 @@ -183,7 +183,7 @@ def test_track_request_rate_failed(self, time_mock): @mock.patch("azure_monitor.sdk.auto_collection.request_metrics.time") def test_track_request_rate_failed_error(self, time_mock): request_metrics_collector = request_metrics.RequestMetrics( - meter=mock_meter, labels=self._test_labels, + meter=self._meter, labels=self._test_labels, ) time_mock.time.return_value = 100 request_metrics.requests_map["last_rate"] = 5.0 From 701a4d2336b9494c1af9cec1b57a535a46dc8193 Mon Sep 17 00:00:00 2001 From: Leighton Date: Thu, 1 Oct 2020 18:46:46 -0400 Subject: [PATCH 10/11] Update request_metrics.py --- .../src/azure_monitor/sdk/auto_collection/request_metrics.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/azure_monitor/src/azure_monitor/sdk/auto_collection/request_metrics.py b/azure_monitor/src/azure_monitor/sdk/auto_collection/request_metrics.py index aba461c..71bca8f 100644 --- a/azure_monitor/src/azure_monitor/sdk/auto_collection/request_metrics.py +++ b/azure_monitor/src/azure_monitor/sdk/auto_collection/request_metrics.py @@ -9,8 +9,6 @@ from opentelemetry.metrics import Meter, Observer from opentelemetry.sdk.metrics import UpDownSumObserver -from azure_monitor.sdk.auto_collection.utils import AutoCollectionType - _requests_lock = threading.Lock() logger = logging.getLogger(__name__) requests_map = dict() From 1f019dfb5bd3d00789a6afab204822d06fd53098 Mon Sep 17 00:00:00 2001 From: Leighton Date: Fri, 2 Oct 2020 12:37:42 -0400 Subject: [PATCH 11/11] changelog --- azure_monitor/CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/azure_monitor/CHANGELOG.md b/azure_monitor/CHANGELOG.md index c81c6c8..8845db2 100644 --- a/azure_monitor/CHANGELOG.md +++ b/azure_monitor/CHANGELOG.md @@ -2,6 +2,9 @@ ## Unreleased +- Remove request metrics from auto-collection + ([#124](https://github.com/microsoft/opentelemetry-azure-monitor-python/pull/124)) + ## 0.5b.0 Released 2020-09-24