diff --git a/CHANGELOG.md b/CHANGELOG.md index 8a17c4f0ba..aaf317cc81 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,99 @@ # Changelog +## 1.45.0 + +This is the final 1.x release for the forseeable future. Development will continue on the 2.x release line. The first 2.x version will be available in the next few weeks. + +### Various fixes & improvements + +- Allow to upsert monitors (#2929) by @sentrivana + + It's now possible to provide `monitor_config` to the `monitor` decorator/context manager directly: + + ```python + from sentry_sdk.crons import monitor + + # All keys except `schedule` are optional + monitor_config = { + "schedule": {"type": "crontab", "value": "0 0 * * *"}, + "timezone": "Europe/Vienna", + "checkin_margin": 10, + "max_runtime": 10, + "failure_issue_threshold": 5, + "recovery_threshold": 5, + } + + @monitor(monitor_slug='', monitor_config=monitor_config) + def tell_the_world(): + print('My scheduled task...') + ``` + + Check out [the cron docs](https://docs.sentry.io/platforms/python/crons/) for details. + +- Add Django `signals_denylist` to filter signals that are attached to by `signals_spans` (#2758) by @lieryan + + If you want to exclude some Django signals from performance tracking, you can use the new `signals_denylist` Django option: + + ```python + import django.db.models.signals + import sentry_sdk + + sentry_sdk.init( + ... + integrations=[ + DjangoIntegration( + ... + signals_denylist=[ + django.db.models.signals.pre_init, + django.db.models.signals.post_init, + ], + ), + ], + ) + ``` + +- `increment` for metrics (#2588) by @mitsuhiko + + `increment` and `inc` are equivalent, so you can pick whichever you like more. + +- Add `value`, `unit` to `before_emit_metric` (#2958) by @sentrivana + + If you add a custom `before_emit_metric`, it'll now accept 4 arguments (the `key`, `value`, `unit` and `tags`) instead of just `key` and `tags`. + + ```python + def before_emit(key, value, unit, tags): + if key == "removed-metric": + return False + tags["extra"] = "foo" + del tags["release"] + return True + + sentry_sdk.init( + ... + _experiments={ + "before_emit_metric": before_emit, + } + ) + ``` + +- Remove experimental metric summary options (#2957) by @sentrivana + + The `_experiments` options `metrics_summary_sample_rate` and `should_summarize_metric` have been removed. + +- New normalization rules for metric keys, names, units, tags (#2946) by @sentrivana +- Change `data_category` from `statsd` to `metric_bucket` (#2954) by @cleptric +- Accessing `__mro__` might throw a `ValueError` (#2952) by @sentrivana +- Suppress prompt spawned by subprocess when using `pythonw` (#2936) by @collinbanko +- Handle `None` in GraphQL query #2715 (#2762) by @czyber +- Do not send "quiet" Sanic exceptions to Sentry (#2821) by @hamedsh +- Implement `metric_bucket` rate limits (#2933) by @cleptric +- Fix type hints for `monitor` decorator (#2944) by @szokeasaurusrex +- Remove deprecated `typing` imports in crons (#2945) by @szokeasaurusrex +- Make `monitor_config` a `TypedDict` (#2931) by @sentrivana +- Add `devenv-requirements.txt` and update env setup instructions (#2761) by @arr-ee +- Bump `types-protobuf` from `4.24.0.20240311` to `4.24.0.20240408` (#2941) by @dependabot +- Disable Codecov check run annotations (#2537) by @eliatcodecov + ## 1.44.1 ### Various fixes & improvements diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index cf972cfd6c..05b642c502 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -8,7 +8,6 @@ This file outlines the process to contribute to the SDK itself. For contributing Please search the [issue tracker](https://github.com/getsentry/sentry-python/issues) before creating a new issue (a problem or an improvement request). Please also ask in our [Sentry Community on Discord](https://discord.com/invite/Ww9hbqr) before submitting a new issue. There are a ton of great people in our Discord community ready to help you! - ## Submitting Changes - Fork the `sentry-python` repo and prepare your changes. @@ -64,7 +63,7 @@ This will make sure that your commits will have the correct coding style. ```bash cd sentry-python -pip install -r linter-requirements.txt +pip install -r devenv-requirements.txt pip install pre-commit @@ -75,12 +74,8 @@ That's it. You should be ready to make changes, run tests, and make commits! If ## Running Tests -To run the tests, first setup your development environment according to the instructions above. Then, install the required packages for running tests with the following command: -```bash -pip install -r test-requirements.txt -``` +You can run all tests with the following command: -Once the requirements are installed, you can run all tests with the following command: ```bash pytest tests/ ``` diff --git a/codecov.yml b/codecov.yml index 93a5b687e4..6e4467b675 100644 --- a/codecov.yml +++ b/codecov.yml @@ -9,3 +9,5 @@ coverage: ignore: - "tests" - "sentry_sdk/_types.py" +github_checks: + annotations: false \ No newline at end of file diff --git a/devenv-requirements.txt b/devenv-requirements.txt new file mode 100644 index 0000000000..2b7abae3c2 --- /dev/null +++ b/devenv-requirements.txt @@ -0,0 +1,5 @@ +-r linter-requirements.txt +-r test-requirements.txt +mockupdb # required by `pymongo` tests that are enabled by `pymongo` from linter requirements +pytest<7.0.0 # https://github.com/pytest-dev/pytest/issues/9621; see tox.ini +pytest-asyncio<=0.21.1 # https://github.com/pytest-dev/pytest-asyncio/issues/706 diff --git a/docs/conf.py b/docs/conf.py index e617c75840..5383a64224 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -30,7 +30,7 @@ copyright = "2019-{}, Sentry Team and Contributors".format(datetime.now().year) author = "Sentry Team and Contributors" -release = "1.44.1" +release = "1.45.0" version = ".".join(release.split(".")[:2]) # The short X.Y version. diff --git a/linter-requirements.txt b/linter-requirements.txt index c390f5fe70..e86ffd506b 100644 --- a/linter-requirements.txt +++ b/linter-requirements.txt @@ -2,7 +2,7 @@ mypy black flake8==5.0.4 # flake8 depends on pyflakes>=3.0.0 and this dropped support for Python 2 "# type:" comments types-certifi -types-protobuf==4.24.0.20240311 # newer raises an error on mypy sentry_sdk +types-protobuf==4.24.0.20240408 # newer raises an error on mypy sentry_sdk types-redis types-setuptools pymongo # There is no separate types module. diff --git a/sentry_sdk/_types.py b/sentry_sdk/_types.py index 49bffb3416..368db17138 100644 --- a/sentry_sdk/_types.py +++ b/sentry_sdk/_types.py @@ -113,7 +113,7 @@ "session", "internal", "profile", - "statsd", + "metric_bucket", "monitor", ] SessionStatus = Literal["ok", "exited", "crashed", "abnormal"] @@ -178,3 +178,37 @@ BucketKey = Tuple[MetricType, str, MeasurementUnit, MetricTagsInternal] MetricMetaKey = Tuple[MetricType, str, MeasurementUnit] + + MonitorConfigScheduleType = Literal["crontab", "interval"] + MonitorConfigScheduleUnit = Literal[ + "year", + "month", + "week", + "day", + "hour", + "minute", + "second", # not supported in Sentry and will result in a warning + ] + + MonitorConfigSchedule = TypedDict( + "MonitorConfigSchedule", + { + "type": MonitorConfigScheduleType, + "value": Union[int, str], + "unit": MonitorConfigScheduleUnit, + }, + total=False, + ) + + MonitorConfig = TypedDict( + "MonitorConfig", + { + "schedule": MonitorConfigSchedule, + "timezone": str, + "checkin_margin": int, + "max_runtime": int, + "failure_issue_threshold": int, + "recovery_threshold": int, + }, + total=False, + ) diff --git a/sentry_sdk/consts.py b/sentry_sdk/consts.py index 047cb1384c..1cf37211e1 100644 --- a/sentry_sdk/consts.py +++ b/sentry_sdk/consts.py @@ -24,10 +24,12 @@ Event, EventProcessor, Hint, + MeasurementUnit, ProfilerMode, TracesSampler, TransactionProcessor, MetricTags, + MetricValue, ) # Experiments are feature flags to enable and disable certain unstable SDK @@ -47,9 +49,9 @@ "transport_zlib_compression_level": Optional[int], "transport_num_pools": Optional[int], "enable_metrics": Optional[bool], - "metrics_summary_sample_rate": Optional[float], - "should_summarize_metric": Optional[Callable[[str, MetricTags], bool]], - "before_emit_metric": Optional[Callable[[str, MetricTags], bool]], + "before_emit_metric": Optional[ + Callable[[str, MetricValue, MeasurementUnit, MetricTags], bool] + ], "metric_code_locations": Optional[bool], }, total=False, @@ -333,4 +335,4 @@ def _get_default_options(): del _get_default_options -VERSION = "1.44.1" +VERSION = "1.45.0" diff --git a/sentry_sdk/crons/_decorator.py b/sentry_sdk/crons/_decorator.py index 5a15000a48..2d0612f681 100644 --- a/sentry_sdk/crons/_decorator.py +++ b/sentry_sdk/crons/_decorator.py @@ -4,35 +4,58 @@ from sentry_sdk._types import TYPE_CHECKING if TYPE_CHECKING: - from typing import ( - Awaitable, - Callable, - ParamSpec, - TypeVar, - Union, - ) + from collections.abc import Awaitable, Callable + from typing import Any, cast, overload, ParamSpec, TypeVar, Union P = ParamSpec("P") R = TypeVar("R") class MonitorMixin: - def __call__(self, fn): - # type: (Callable[P, R]) -> Callable[P, Union[R, Awaitable[R]]] + if TYPE_CHECKING: + + @overload + def __call__(self, fn): + # type: (Callable[P, Awaitable[Any]]) -> Callable[P, Awaitable[Any]] + # Unfortunately, mypy does not give us any reliable way to type check the + # return value of an Awaitable (i.e. async function) for this overload, + # since calling iscouroutinefunction narrows the type to Callable[P, Awaitable[Any]]. + ... + + @overload + def __call__(self, fn): + # type: (Callable[P, R]) -> Callable[P, R] + ... + + def __call__( + self, + fn, # type: Union[Callable[P, R], Callable[P, Awaitable[Any]]] + ): + # type: (...) -> Union[Callable[P, R], Callable[P, Awaitable[Any]]] if iscoroutinefunction(fn): - - @wraps(fn) - async def inner(*args: "P.args", **kwargs: "P.kwargs"): - # type: (...) -> R - with self: # type: ignore[attr-defined] - return await fn(*args, **kwargs) + return self._async_wrapper(fn) else: + if TYPE_CHECKING: + fn = cast("Callable[P, R]", fn) + return self._sync_wrapper(fn) + + def _async_wrapper(self, fn): + # type: (Callable[P, Awaitable[Any]]) -> Callable[P, Awaitable[Any]] + @wraps(fn) + async def inner(*args: "P.args", **kwargs: "P.kwargs"): + # type: (...) -> R + with self: # type: ignore[attr-defined] + return await fn(*args, **kwargs) + + return inner - @wraps(fn) - def inner(*args: "P.args", **kwargs: "P.kwargs"): - # type: (...) -> R - with self: # type: ignore[attr-defined] - return fn(*args, **kwargs) + def _sync_wrapper(self, fn): + # type: (Callable[P, R]) -> Callable[P, R] + @wraps(fn) + def inner(*args: "P.args", **kwargs: "P.kwargs"): + # type: (...) -> R + with self: # type: ignore[attr-defined] + return fn(*args, **kwargs) return inner diff --git a/sentry_sdk/crons/api.py b/sentry_sdk/crons/api.py index 92d113a924..1a95583301 100644 --- a/sentry_sdk/crons/api.py +++ b/sentry_sdk/crons/api.py @@ -5,18 +5,18 @@ if TYPE_CHECKING: - from typing import Any, Dict, Optional - from sentry_sdk._types import Event + from typing import Optional + from sentry_sdk._types import Event, MonitorConfig def _create_check_in_event( - monitor_slug=None, - check_in_id=None, - status=None, - duration_s=None, - monitor_config=None, + monitor_slug=None, # type: Optional[str] + check_in_id=None, # type: Optional[str] + status=None, # type: Optional[str] + duration_s=None, # type: Optional[float] + monitor_config=None, # type: Optional[MonitorConfig] ): - # type: (Optional[str], Optional[str], Optional[str], Optional[float], Optional[Dict[str, Any]]) -> Event + # type: (...) -> Event options = Hub.current.client.options if Hub.current.client else {} check_in_id = check_in_id or uuid.uuid4().hex # type: str @@ -37,13 +37,13 @@ def _create_check_in_event( def capture_checkin( - monitor_slug=None, - check_in_id=None, - status=None, - duration=None, - monitor_config=None, + monitor_slug=None, # type: Optional[str] + check_in_id=None, # type: Optional[str] + status=None, # type: Optional[str] + duration=None, # type: Optional[float] + monitor_config=None, # type: Optional[MonitorConfig] ): - # type: (Optional[str], Optional[str], Optional[str], Optional[float], Optional[Dict[str, Any]]) -> str + # type: (...) -> str check_in_event = _create_check_in_event( monitor_slug=monitor_slug, check_in_id=check_in_id, diff --git a/sentry_sdk/crons/decorator.py b/sentry_sdk/crons/decorator.py index 38653ca161..6c5f747b97 100644 --- a/sentry_sdk/crons/decorator.py +++ b/sentry_sdk/crons/decorator.py @@ -7,6 +7,7 @@ if TYPE_CHECKING: from typing import Optional, Type from types import TracebackType + from sentry_sdk._types import MonitorConfig if PY2: from sentry_sdk.crons._decorator_py2 import MonitorMixin @@ -47,15 +48,18 @@ def test(arg): ``` """ - def __init__(self, monitor_slug=None): - # type: (Optional[str]) -> None + def __init__(self, monitor_slug=None, monitor_config=None): + # type: (Optional[str], Optional[MonitorConfig]) -> None self.monitor_slug = monitor_slug + self.monitor_config = monitor_config def __enter__(self): # type: () -> None self.start_timestamp = now() self.check_in_id = capture_checkin( - monitor_slug=self.monitor_slug, status=MonitorStatus.IN_PROGRESS + monitor_slug=self.monitor_slug, + status=MonitorStatus.IN_PROGRESS, + monitor_config=self.monitor_config, ) def __exit__(self, exc_type, exc_value, traceback): @@ -72,4 +76,5 @@ def __exit__(self, exc_type, exc_value, traceback): check_in_id=self.check_in_id, status=status, duration=duration_s, + monitor_config=self.monitor_config, ) diff --git a/sentry_sdk/envelope.py b/sentry_sdk/envelope.py index 8f89bda238..fb214a45f4 100644 --- a/sentry_sdk/envelope.py +++ b/sentry_sdk/envelope.py @@ -261,7 +261,7 @@ def data_category(self): elif ty == "profile": return "profile" elif ty == "statsd": - return "statsd" + return "metric_bucket" elif ty == "check_in": return "monitor" else: diff --git a/sentry_sdk/integrations/celery.py b/sentry_sdk/integrations/celery.py index f2e1aff48a..984197316f 100644 --- a/sentry_sdk/integrations/celery.py +++ b/sentry_sdk/integrations/celery.py @@ -3,6 +3,11 @@ import sys import time +try: + from typing import cast +except ImportError: + cast = lambda _, o: o + from sentry_sdk.api import continue_trace from sentry_sdk.consts import OP from sentry_sdk._compat import reraise @@ -31,7 +36,15 @@ from typing import Union from sentry_sdk.tracing import Span - from sentry_sdk._types import EventProcessor, Event, Hint, ExcInfo + from sentry_sdk._types import ( + EventProcessor, + Event, + Hint, + ExcInfo, + MonitorConfig, + MonitorConfigScheduleType, + MonitorConfigScheduleUnit, + ) F = TypeVar("F", bound=Callable[..., Any]) @@ -416,7 +429,7 @@ def _get_headers(task): def _get_humanized_interval(seconds): - # type: (float) -> Tuple[int, str] + # type: (float) -> Tuple[int, MonitorConfigScheduleUnit] TIME_UNITS = ( # noqa: N806 ("day", 60 * 60 * 24.0), ("hour", 60 * 60.0), @@ -427,17 +440,17 @@ def _get_humanized_interval(seconds): for unit, divider in TIME_UNITS: if seconds >= divider: interval = int(seconds / divider) - return (interval, unit) + return (interval, cast("MonitorConfigScheduleUnit", unit)) return (int(seconds), "second") def _get_monitor_config(celery_schedule, app, monitor_name): - # type: (Any, Celery, str) -> Dict[str, Any] - monitor_config = {} # type: Dict[str, Any] - schedule_type = None # type: Optional[str] + # type: (Any, Celery, str) -> MonitorConfig + monitor_config = {} # type: MonitorConfig + schedule_type = None # type: Optional[MonitorConfigScheduleType] schedule_value = None # type: Optional[Union[str, int]] - schedule_unit = None # type: Optional[str] + schedule_unit = None # type: Optional[MonitorConfigScheduleUnit] if isinstance(celery_schedule, crontab): schedule_type = "crontab" diff --git a/sentry_sdk/integrations/django/__init__.py b/sentry_sdk/integrations/django/__init__.py index 98834a4693..a38674f09d 100644 --- a/sentry_sdk/integrations/django/__init__.py +++ b/sentry_sdk/integrations/django/__init__.py @@ -114,6 +114,7 @@ class DjangoIntegration(Integration): middleware_spans = None signals_spans = None cache_spans = None + signals_denylist = [] # type: list[signals.Signal] def __init__( self, @@ -121,8 +122,9 @@ def __init__( middleware_spans=True, signals_spans=True, cache_spans=False, + signals_denylist=None, ): - # type: (str, bool, bool, bool) -> None + # type: (str, bool, bool, bool, Optional[list[signals.Signal]]) -> None if transaction_style not in TRANSACTION_STYLE_VALUES: raise ValueError( "Invalid value for transaction_style: %s (must be in %s)" @@ -132,6 +134,7 @@ def __init__( self.middleware_spans = middleware_spans self.signals_spans = signals_spans self.cache_spans = cache_spans + self.signals_denylist = signals_denylist or [] @staticmethod def setup_once(): diff --git a/sentry_sdk/integrations/django/signals_handlers.py b/sentry_sdk/integrations/django/signals_handlers.py index 097a56c8aa..3d1aadab1f 100644 --- a/sentry_sdk/integrations/django/signals_handlers.py +++ b/sentry_sdk/integrations/django/signals_handlers.py @@ -78,7 +78,11 @@ def wrapper(*args, **kwargs): return wrapper integration = hub.get_integration(DjangoIntegration) - if integration and integration.signals_spans: + if ( + integration + and integration.signals_spans + and self not in integration.signals_denylist + ): for idx, receiver in enumerate(sync_receivers): sync_receivers[idx] = sentry_sync_receiver_wrapper(receiver) diff --git a/sentry_sdk/integrations/sanic.py b/sentry_sdk/integrations/sanic.py index 53d3cb6c07..7e0c690da0 100644 --- a/sentry_sdk/integrations/sanic.py +++ b/sentry_sdk/integrations/sanic.py @@ -342,6 +342,8 @@ def _capture_exception(exception): client_options=client.options, mechanism={"type": "sanic", "handled": False}, ) + if hint and hasattr(hint["exc_info"][0], "quiet") and hint["exc_info"][0].quiet: + return hub.capture_event(event, hint=hint) diff --git a/sentry_sdk/integrations/strawberry.py b/sentry_sdk/integrations/strawberry.py index 3d450e0692..5bc4184bee 100644 --- a/sentry_sdk/integrations/strawberry.py +++ b/sentry_sdk/integrations/strawberry.py @@ -145,6 +145,9 @@ def on_operation(self): operation_type = "query" op = OP.GRAPHQL_QUERY + if self.execution_context.query is None: + self.execution_context.query = "" + if self.execution_context.query.strip().startswith("mutation"): operation_type = "mutation" op = OP.GRAPHQL_MUTATION diff --git a/sentry_sdk/metrics.py b/sentry_sdk/metrics.py index b59cf033ec..1e4f5a532e 100644 --- a/sentry_sdk/metrics.py +++ b/sentry_sdk/metrics.py @@ -54,8 +54,6 @@ _in_metrics = ContextVar("in_metrics", default=False) -_sanitize_key = partial(re.compile(r"[^a-zA-Z0-9_/.-]+").sub, "_") -_sanitize_value = partial(re.compile(r"[^\w\d\s_:/@\.{}\[\]$-]+", re.UNICODE).sub, "") _set = set # set is shadowed below GOOD_TRANSACTION_SOURCES = frozenset( @@ -67,6 +65,32 @@ ] ) +_sanitize_unit = partial(re.compile(r"[^a-zA-Z0-9_]+").sub, "") +_sanitize_metric_key = partial(re.compile(r"[^a-zA-Z0-9_\-.]+").sub, "_") +_sanitize_tag_key = partial(re.compile(r"[^a-zA-Z0-9_\-.\/]+").sub, "") +_TAG_VALUE_SANITIZATION_TABLE = { + "\n": "\\n", + "\r": "\\r", + "\t": "\\t", + "\\": "\\\\", + "|": "\\u{7c}", + ",": "\\u{2c}", +} + + +def _sanitize_tag_value(value): + # type: (str) -> str + return "".join( + [ + ( + _TAG_VALUE_SANITIZATION_TABLE[char] + if char in _TAG_VALUE_SANITIZATION_TABLE + else char + ) + for char in value + ] + ) + def get_code_location(stacklevel): # type: (int) -> Optional[Dict[str, Any]] @@ -269,7 +293,8 @@ def _encode_metrics(flushable_buckets): for timestamp, buckets in flushable_buckets: for bucket_key, metric in iteritems(buckets): metric_type, metric_name, metric_unit, metric_tags = bucket_key - metric_name = _sanitize_key(metric_name) + metric_name = _sanitize_metric_key(metric_name) + metric_unit = _sanitize_unit(metric_unit) _write(metric_name.encode("utf-8")) _write(b"@") _write(metric_unit.encode("utf-8")) @@ -285,7 +310,7 @@ def _encode_metrics(flushable_buckets): _write(b"|#") first = True for tag_key, tag_value in metric_tags: - tag_key = _sanitize_key(tag_key) + tag_key = _sanitize_tag_key(tag_key) if not tag_key: continue if first: @@ -294,7 +319,7 @@ def _encode_metrics(flushable_buckets): _write(b",") _write(tag_key.encode("utf-8")) _write(b":") - _write(_sanitize_value(tag_value).encode("utf-8")) + _write(_sanitize_tag_value(tag_value).encode("utf-8")) _write(b"|T") _write(str(timestamp).encode("ascii")) @@ -309,7 +334,9 @@ def _encode_locations(timestamp, code_locations): for key, loc in code_locations: metric_type, name, unit = key - mri = "{}:{}@{}".format(metric_type, _sanitize_key(name), unit) + mri = "{}:{}@{}".format( + metric_type, _sanitize_metric_key(name), _sanitize_unit(unit) + ) loc["type"] = "location" mapping.setdefault(mri, []).append(loc) @@ -557,6 +584,8 @@ def add( # Given the new weight we consider whether we want to force flush. self._consider_force_flush() + # For sets, we only record that a value has been added to the set but not which one. + # See develop docs: https://develop.sentry.dev/sdk/metrics/#sets if local_aggregator is not None: local_value = float(added if ty == "s" else value) local_aggregator.add(ty, key, local_value, unit, serialized_tags) @@ -701,15 +730,13 @@ def _get_aggregator(): ) -def _get_aggregator_and_update_tags(key, tags): - # type: (str, Optional[MetricTags]) -> Tuple[Optional[MetricsAggregator], Optional[LocalAggregator], Optional[MetricTags]] +def _get_aggregator_and_update_tags(key, value, unit, tags): + # type: (str, Optional[MetricValue], MeasurementUnit, Optional[MetricTags]) -> Tuple[Optional[MetricsAggregator], Optional[LocalAggregator], Optional[MetricTags]] hub = sentry_sdk.Hub.current client = hub.client if client is None or client.metrics_aggregator is None: return None, None, tags - experiments = client.options.get("_experiments", {}) - updated_tags = dict(tags or ()) # type: Dict[str, MetricTagValue] updated_tags.setdefault("release", client.options["release"]) updated_tags.setdefault("environment", client.options["environment"]) @@ -725,31 +752,20 @@ def _get_aggregator_and_update_tags(key, tags): if transaction_name: updated_tags.setdefault("transaction", transaction_name) if scope._span is not None: - sample_rate = experiments.get("metrics_summary_sample_rate") - # We default the sample rate of metrics summaries to 1.0 only when the sample rate is `None` since we - # want to honor the user's decision if they pass a valid float. - if sample_rate is None: - sample_rate = 1.0 - should_summarize_metric_callback = experiments.get( - "should_summarize_metric" - ) - if random.random() < sample_rate and ( - should_summarize_metric_callback is None - or should_summarize_metric_callback(key, updated_tags) - ): - local_aggregator = scope._span._get_local_aggregator() + local_aggregator = scope._span._get_local_aggregator() + experiments = client.options.get("_experiments", {}) before_emit_callback = experiments.get("before_emit_metric") if before_emit_callback is not None: with recursion_protection() as in_metrics: if not in_metrics: - if not before_emit_callback(key, updated_tags): + if not before_emit_callback(key, value, unit, updated_tags): return None, None, updated_tags return client.metrics_aggregator, local_aggregator, updated_tags -def incr( +def increment( key, # type: str value=1.0, # type: float unit="none", # type: MeasurementUnit @@ -759,13 +775,19 @@ def incr( ): # type: (...) -> None """Increments a counter.""" - aggregator, local_aggregator, tags = _get_aggregator_and_update_tags(key, tags) + aggregator, local_aggregator, tags = _get_aggregator_and_update_tags( + key, value, unit, tags + ) if aggregator is not None: aggregator.add( "c", key, value, unit, tags, timestamp, local_aggregator, stacklevel ) +# alias as incr is relatively common in python +incr = increment + + class _Timing(object): def __init__( self, @@ -816,7 +838,10 @@ def __exit__(self, exc_type, exc_value, tb): # type: (Any, Any, Any) -> None assert self._span, "did not enter" aggregator, local_aggregator, tags = _get_aggregator_and_update_tags( - self.key, self.tags + self.key, + self.value, + self.unit, + self.tags, ) if aggregator is not None: elapsed = TIMING_FUNCTIONS[self.unit]() - self.entered # type: ignore @@ -871,7 +896,9 @@ def timing( - it can be used as a decorator """ if value is not None: - aggregator, local_aggregator, tags = _get_aggregator_and_update_tags(key, tags) + aggregator, local_aggregator, tags = _get_aggregator_and_update_tags( + key, value, unit, tags + ) if aggregator is not None: aggregator.add( "d", key, value, unit, tags, timestamp, local_aggregator, stacklevel @@ -889,7 +916,9 @@ def distribution( ): # type: (...) -> None """Emits a distribution.""" - aggregator, local_aggregator, tags = _get_aggregator_and_update_tags(key, tags) + aggregator, local_aggregator, tags = _get_aggregator_and_update_tags( + key, value, unit, tags + ) if aggregator is not None: aggregator.add( "d", key, value, unit, tags, timestamp, local_aggregator, stacklevel @@ -906,7 +935,9 @@ def set( ): # type: (...) -> None """Emits a set.""" - aggregator, local_aggregator, tags = _get_aggregator_and_update_tags(key, tags) + aggregator, local_aggregator, tags = _get_aggregator_and_update_tags( + key, value, unit, tags + ) if aggregator is not None: aggregator.add( "s", key, value, unit, tags, timestamp, local_aggregator, stacklevel @@ -923,7 +954,9 @@ def gauge( ): # type: (...) -> None """Emits a gauge.""" - aggregator, local_aggregator, tags = _get_aggregator_and_update_tags(key, tags) + aggregator, local_aggregator, tags = _get_aggregator_and_update_tags( + key, value, unit, tags + ) if aggregator is not None: aggregator.add( "g", key, value, unit, tags, timestamp, local_aggregator, stacklevel diff --git a/sentry_sdk/profiler.py b/sentry_sdk/profiler.py index 4fa3e481ae..da5a4a8228 100644 --- a/sentry_sdk/profiler.py +++ b/sentry_sdk/profiler.py @@ -347,7 +347,7 @@ def get_frame_name(frame): for cls in frame.f_locals["self"].__class__.__mro__: if name in cls.__dict__: return "{}.{}".format(cls.__name__, name) - except AttributeError: + except (AttributeError, ValueError): pass # if it was a class method, (decorated with `@classmethod`) @@ -363,7 +363,7 @@ def get_frame_name(frame): for cls in frame.f_locals["cls"].__mro__: if name in cls.__dict__: return "{}.{}".format(cls.__name__, name) - except AttributeError: + except (AttributeError, ValueError): pass # nothing we can do if it is a staticmethod (decorated with @staticmethod) diff --git a/sentry_sdk/transport.py b/sentry_sdk/transport.py index 9ea9cd0c98..d2fc734f7c 100644 --- a/sentry_sdk/transport.py +++ b/sentry_sdk/transport.py @@ -144,10 +144,22 @@ def _parse_rate_limits(header, now=None): for limit in header.split(","): try: - retry_after, categories, _ = limit.strip().split(":", 2) + parameters = limit.strip().split(":") + retry_after, categories = parameters[:2] + retry_after = now + timedelta(seconds=int(retry_after)) for category in categories and categories.split(";") or (None,): - yield category, retry_after + if category == "metric_bucket": + try: + namespaces = parameters[4].split(";") + except IndexError: + namespaces = [] + + if not namespaces or "custom" in namespaces: + yield category, retry_after + + else: + yield category, retry_after except (LookupError, ValueError): continue @@ -210,6 +222,7 @@ def record_lost_event( # quantity of 0 is actually 1 as we do not want to count # empty attachments as actually empty. quantity = len(item.get_bytes()) or 1 + elif data_category is None: raise TypeError("data category not provided") @@ -336,7 +349,14 @@ def _check_disabled(self, category): # type: (str) -> bool def _disabled(bucket): # type: (Any) -> bool + + # The envelope item type used for metrics is statsd + # whereas the rate limit category is metric_bucket + if bucket == "statsd": + bucket = "metric_bucket" + ts = self._disabled_until.get(bucket) + return ts is not None and ts > datetime_utcnow() return _disabled(category) or _disabled(None) @@ -402,7 +422,7 @@ def _send_envelope( new_items = [] for item in envelope.items: if self._check_disabled(item.data_category): - if item.data_category in ("transaction", "error", "default"): + if item.data_category in ("transaction", "error", "default", "statsd"): self.on_dropped_event("self_rate_limits") self.record_lost_event("ratelimit_backoff", item=item) else: diff --git a/sentry_sdk/utils.py b/sentry_sdk/utils.py index a64b4b4d98..efacd6161b 100644 --- a/sentry_sdk/utils.py +++ b/sentry_sdk/utils.py @@ -106,9 +106,16 @@ def get_git_revision(): # type: () -> Optional[str] try: with open(os.path.devnull, "w+") as null: + # prevent command prompt windows from popping up on windows + startupinfo = None + if sys.platform == "win32" or sys.platform == "cygwin": + startupinfo = subprocess.STARTUPINFO() + startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW + revision = ( subprocess.Popen( ["git", "rev-parse", "HEAD"], + startupinfo=startupinfo, stdout=subprocess.PIPE, stderr=null, stdin=null, diff --git a/setup.py b/setup.py index 4a38adf0a5..14da2fc74c 100644 --- a/setup.py +++ b/setup.py @@ -21,7 +21,7 @@ def get_file_text(file_name): setup( name="sentry-sdk", - version="1.44.1", + version="1.45.0", author="Sentry Team and Contributors", author_email="hello@sentry.io", url="https://github.com/getsentry/sentry-python", diff --git a/tests/crons/test_crons.py b/tests/crons/test_crons.py index 0b31494acf..1f50a33751 100644 --- a/tests/crons/test_crons.py +++ b/tests/crons/test_crons.py @@ -33,6 +33,22 @@ def _break_world_contextmanager(name): return "Hello, {}".format(name) +@sentry_sdk.monitor(monitor_slug="ghi789", monitor_config=None) +def _no_monitor_config(): + return + + +@sentry_sdk.monitor( + monitor_slug="ghi789", + monitor_config={ + "schedule": {"type": "crontab", "value": "0 0 * * *"}, + "failure_issue_threshold": 5, + }, +) +def _with_monitor_config(): + return + + def test_decorator(sentry_init): sentry_init() @@ -45,7 +61,9 @@ def test_decorator(sentry_init): # Check for initial checkin fake_capture_checkin.assert_has_calls( [ - mock.call(monitor_slug="abc123", status="in_progress"), + mock.call( + monitor_slug="abc123", status="in_progress", monitor_config=None + ), ] ) @@ -70,7 +88,9 @@ def test_decorator_error(sentry_init): # Check for initial checkin fake_capture_checkin.assert_has_calls( [ - mock.call(monitor_slug="def456", status="in_progress"), + mock.call( + monitor_slug="def456", status="in_progress", monitor_config=None + ), ] ) @@ -93,7 +113,9 @@ def test_contextmanager(sentry_init): # Check for initial checkin fake_capture_checkin.assert_has_calls( [ - mock.call(monitor_slug="abc123", status="in_progress"), + mock.call( + monitor_slug="abc123", status="in_progress", monitor_config=None + ), ] ) @@ -118,7 +140,9 @@ def test_contextmanager_error(sentry_init): # Check for initial checkin fake_capture_checkin.assert_has_calls( [ - mock.call(monitor_slug="def456", status="in_progress"), + mock.call( + monitor_slug="def456", status="in_progress", monitor_config=None + ), ] ) @@ -194,6 +218,8 @@ def test_monitor_config(sentry_init, capture_envelopes): monitor_config = { "schedule": {"type": "crontab", "value": "0 0 * * *"}, + "failure_issue_threshold": 5, + "recovery_threshold": 5, } capture_checkin(monitor_slug="abc123", monitor_config=monitor_config) @@ -211,6 +237,41 @@ def test_monitor_config(sentry_init, capture_envelopes): assert "monitor_config" not in check_in +def test_decorator_monitor_config(sentry_init, capture_envelopes): + sentry_init() + envelopes = capture_envelopes() + + _with_monitor_config() + + assert len(envelopes) == 2 + + for check_in_envelope in envelopes: + assert len(check_in_envelope.items) == 1 + check_in = check_in_envelope.items[0].payload.json + + assert check_in["monitor_slug"] == "ghi789" + assert check_in["monitor_config"] == { + "schedule": {"type": "crontab", "value": "0 0 * * *"}, + "failure_issue_threshold": 5, + } + + +def test_decorator_no_monitor_config(sentry_init, capture_envelopes): + sentry_init() + envelopes = capture_envelopes() + + _no_monitor_config() + + assert len(envelopes) == 2 + + for check_in_envelope in envelopes: + assert len(check_in_envelope.items) == 1 + check_in = check_in_envelope.items[0].payload.json + + assert check_in["monitor_slug"] == "ghi789" + assert "monitor_config" not in check_in + + def test_capture_checkin_sdk_not_initialized(): # Tests that the capture_checkin does not raise an error when Sentry SDK is not initialized. # sentry_init() is intentionally omitted. diff --git a/tests/crons/test_crons_async_py3.py b/tests/crons/test_crons_async_py3.py index 6e00b594bd..53ec96d713 100644 --- a/tests/crons/test_crons_async_py3.py +++ b/tests/crons/test_crons_async_py3.py @@ -49,7 +49,9 @@ async def test_decorator(sentry_init): # Check for initial checkin fake_capture_checkin.assert_has_calls( [ - mock.call(monitor_slug="abc123", status="in_progress"), + mock.call( + monitor_slug="abc123", status="in_progress", monitor_config=None + ), ] ) @@ -75,7 +77,9 @@ async def test_decorator_error(sentry_init): # Check for initial checkin fake_capture_checkin.assert_has_calls( [ - mock.call(monitor_slug="def456", status="in_progress"), + mock.call( + monitor_slug="def456", status="in_progress", monitor_config=None + ), ] ) @@ -99,7 +103,9 @@ async def test_contextmanager(sentry_init): # Check for initial checkin fake_capture_checkin.assert_has_calls( [ - mock.call(monitor_slug="abc123", status="in_progress"), + mock.call( + monitor_slug="abc123", status="in_progress", monitor_config=None + ), ] ) @@ -125,7 +131,9 @@ async def test_contextmanager_error(sentry_init): # Check for initial checkin fake_capture_checkin.assert_has_calls( [ - mock.call(monitor_slug="def456", status="in_progress"), + mock.call( + monitor_slug="def456", status="in_progress", monitor_config=None + ), ] ) diff --git a/tests/integrations/django/myapp/signals.py b/tests/integrations/django/myapp/signals.py new file mode 100644 index 0000000000..3dab92b8d9 --- /dev/null +++ b/tests/integrations/django/myapp/signals.py @@ -0,0 +1,15 @@ +from django.core import signals +from django.dispatch import receiver + +myapp_custom_signal = signals.Signal() +myapp_custom_signal_silenced = signals.Signal() + + +@receiver(myapp_custom_signal) +def signal_handler(sender, **kwargs): + assert sender == "hello" + + +@receiver(myapp_custom_signal_silenced) +def signal_handler_silenced(sender, **kwargs): + assert sender == "hello" diff --git a/tests/integrations/django/myapp/urls.py b/tests/integrations/django/myapp/urls.py index 92621b07a2..672a9b15ae 100644 --- a/tests/integrations/django/myapp/urls.py +++ b/tests/integrations/django/myapp/urls.py @@ -76,6 +76,11 @@ def path(path, *args, **kwargs): name="csrf_hello_not_exempt", ), path("sync/thread_ids", views.thread_ids_sync, name="thread_ids_sync"), + path( + "send-myapp-custom-signal", + views.send_myapp_custom_signal, + name="send_myapp_custom_signal", + ), ] # async views diff --git a/tests/integrations/django/myapp/views.py b/tests/integrations/django/myapp/views.py index 193147003b..294895430b 100644 --- a/tests/integrations/django/myapp/views.py +++ b/tests/integrations/django/myapp/views.py @@ -14,6 +14,11 @@ from django.views.decorators.csrf import csrf_exempt from django.views.generic import ListView +from tests.integrations.django.myapp.signals import ( + myapp_custom_signal, + myapp_custom_signal_silenced, +) + try: from rest_framework.decorators import api_view from rest_framework.response import Response @@ -253,3 +258,10 @@ def thread_ids_sync(*args, **kwargs): my_async_view = None thread_ids_async = None post_echo_async = None + + +@csrf_exempt +def send_myapp_custom_signal(request): + myapp_custom_signal.send(sender="hello") + myapp_custom_signal_silenced.send(sender="hello") + return HttpResponse("ok") diff --git a/tests/integrations/django/test_basic.py b/tests/integrations/django/test_basic.py index 8c01c71830..1efe4be278 100644 --- a/tests/integrations/django/test_basic.py +++ b/tests/integrations/django/test_basic.py @@ -29,6 +29,7 @@ from sentry_sdk.tracing import Span from tests.conftest import ApproxDict, unpack_werkzeug_response from tests.integrations.django.myapp.wsgi import application +from tests.integrations.django.myapp.signals import myapp_custom_signal_silenced from tests.integrations.django.utils import pytest_mark_django_db_decorator DJANGO_VERSION = DJANGO_VERSION[:2] @@ -1035,6 +1036,47 @@ def test_signals_spans_disabled(sentry_init, client, capture_events): assert not transaction["spans"] +EXPECTED_SIGNALS_SPANS_FILTERED = """\ +- op="http.server": description=null + - op="event.django": description="django.db.reset_queries" + - op="event.django": description="django.db.close_old_connections" + - op="event.django": description="tests.integrations.django.myapp.signals.signal_handler"\ +""" + + +def test_signals_spans_filtering(sentry_init, client, capture_events, render_span_tree): + sentry_init( + integrations=[ + DjangoIntegration( + middleware_spans=False, + signals_denylist=[ + myapp_custom_signal_silenced, + ], + ), + ], + traces_sample_rate=1.0, + ) + events = capture_events() + + client.get(reverse("send_myapp_custom_signal")) + + (transaction,) = events + + assert render_span_tree(transaction) == EXPECTED_SIGNALS_SPANS_FILTERED + + assert transaction["spans"][0]["op"] == "event.django" + assert transaction["spans"][0]["description"] == "django.db.reset_queries" + + assert transaction["spans"][1]["op"] == "event.django" + assert transaction["spans"][1]["description"] == "django.db.close_old_connections" + + assert transaction["spans"][2]["op"] == "event.django" + assert ( + transaction["spans"][2]["description"] + == "tests.integrations.django.myapp.signals.signal_handler" + ) + + def test_csrf(sentry_init, client): """ Assert that CSRF view decorator works even with the view wrapped in our own diff --git a/tests/integrations/strawberry/test_strawberry_py3.py b/tests/integrations/strawberry/test_strawberry_py3.py index 4911a1b5c3..e84c5f6fa5 100644 --- a/tests/integrations/strawberry/test_strawberry_py3.py +++ b/tests/integrations/strawberry/test_strawberry_py3.py @@ -600,3 +600,30 @@ def test_transaction_mutation( "graphql.path": "change", } ) + + +@parameterize_strawberry_test +def test_handle_none_query_gracefully( + request, + sentry_init, + capture_events, + client_factory, + async_execution, + framework_integrations, +): + sentry_init( + integrations=[ + StrawberryIntegration(async_execution=async_execution), + ] + + framework_integrations, + ) + events = capture_events() + + schema = strawberry.Schema(Query) + + client_factory = request.getfixturevalue(client_factory) + client = client_factory(schema) + + client.post("/graphql", json={}) + + assert len(events) == 0, "expected no events to be sent to Sentry" diff --git a/tests/test_metrics.py b/tests/test_metrics.py index 1d4a49fcb2..741935615d 100644 --- a/tests/test_metrics.py +++ b/tests/test_metrics.py @@ -58,7 +58,7 @@ def parse_metrics(bytes): @minimum_python_37_with_gevent @pytest.mark.forked -def test_incr(sentry_init, capture_envelopes, maybe_monkeypatched_threading): +def test_increment(sentry_init, capture_envelopes, maybe_monkeypatched_threading): sentry_init( release="fun-release", environment="not-fun-env", @@ -67,7 +67,8 @@ def test_incr(sentry_init, capture_envelopes, maybe_monkeypatched_threading): ts = time.time() envelopes = capture_envelopes() - metrics.incr("foobar", 1.0, tags={"foo": "bar", "blub": "blah"}, timestamp=ts) + metrics.increment("foobar", 1.0, tags={"foo": "bar", "blub": "blah"}, timestamp=ts) + # python specific alias metrics.incr("foobar", 2.0, tags={"foo": "bar", "blub": "blah"}, timestamp=ts) Hub.current.flush() @@ -487,8 +488,8 @@ def test_multiple(sentry_init, capture_envelopes): metrics.gauge("my-gauge", 20.0, tags={"x": "y"}, timestamp=ts) metrics.gauge("my-gauge", 30.0, tags={"x": "y"}, timestamp=ts) for _ in range(10): - metrics.incr("counter-1", 1.0, timestamp=ts) - metrics.incr("counter-2", 1.0, timestamp=ts) + metrics.increment("counter-1", 1.0, timestamp=ts) + metrics.increment("counter-2", 1.0, timestamp=ts) Hub.current.flush() @@ -570,18 +571,13 @@ def test_transaction_name( @minimum_python_37_with_gevent @pytest.mark.forked -@pytest.mark.parametrize("sample_rate", [1.0, None]) def test_metric_summaries( - sentry_init, capture_envelopes, sample_rate, maybe_monkeypatched_threading + sentry_init, capture_envelopes, maybe_monkeypatched_threading ): sentry_init( release="fun-release@1.0.0", environment="not-fun-env", enable_tracing=True, - _experiments={ - "enable_metrics": True, - "metrics_summary_sample_rate": sample_rate, - }, ) ts = time.time() envelopes = capture_envelopes() @@ -589,7 +585,7 @@ def test_metric_summaries( with start_transaction( op="stuff", name="/foo", source=TRANSACTION_SOURCE_ROUTE ) as transaction: - metrics.incr("root-counter", timestamp=ts) + metrics.increment("root-counter", timestamp=ts) with metrics.timing("my-timer-metric", tags={"a": "b"}, timestamp=ts): for x in range(10): metrics.distribution("my-dist", float(x), timestamp=ts) @@ -681,171 +677,99 @@ def test_metric_summaries( @minimum_python_37_with_gevent @pytest.mark.forked -def test_metrics_summary_disabled( - sentry_init, capture_envelopes, maybe_monkeypatched_threading -): - sentry_init( - release="fun-release@1.0.0", - environment="not-fun-env", - enable_tracing=True, - _experiments={"enable_metrics": True, "metrics_summary_sample_rate": 0.0}, - ) - ts = time.time() - envelopes = capture_envelopes() - - with start_transaction( - op="stuff", name="/foo", source=TRANSACTION_SOURCE_ROUTE - ) as transaction: - with metrics.timing("my-timer-metric", tags={"a": "b"}, timestamp=ts): - pass - - Hub.current.flush() - - (transaction, envelope) = envelopes - - # Metrics Emission - assert envelope.items[0].headers["type"] == "statsd" - m = parse_metrics(envelope.items[0].payload.get_bytes()) - - assert len(m) == 1 - assert m[0][1] == "my-timer-metric@second" - assert m[0][2] == "d" - assert len(m[0][3]) == 1 - assert m[0][4] == { - "a": "b", - "transaction": "/foo", - "release": "fun-release@1.0.0", - "environment": "not-fun-env", - } - - # Measurement Attachment - t = transaction.items[0].get_transaction_event() - assert "_metrics_summary" not in t - assert "_metrics_summary" not in t["spans"][0] - - -@minimum_python_37_with_gevent -@pytest.mark.forked -def test_metrics_summary_filtered( - sentry_init, capture_envelopes, maybe_monkeypatched_threading +@pytest.mark.parametrize( + "metric_name,metric_unit,expected_name", + [ + ("first-metric", "nano-second", "first-metric@nanosecond"), + ("another_metric?", "nano second", "another_metric_@nanosecond"), + ( + "metric", + "nanosecond", + "metric@nanosecond", + ), + ( + "my.amaze.metric I guess", + "nano|\nsecond", + "my.amaze.metric_I_guess@nanosecond", + ), + # fmt: off + (u"métríc", u"nanöseconď", u"m_tr_c@nansecon"), + # fmt: on + ], +) +def test_metric_name_normalization( + sentry_init, + capture_envelopes, + metric_name, + metric_unit, + expected_name, + maybe_monkeypatched_threading, ): - def should_summarize_metric(key, tags): - return key == "foo" - sentry_init( - release="fun-release@1.0.0", - environment="not-fun-env", - enable_tracing=True, - _experiments={ - "enable_metrics": True, - "metrics_summary_sample_rate": 1.0, - "should_summarize_metric": should_summarize_metric, - }, + _experiments={"enable_metrics": True, "metric_code_locations": False}, ) - ts = time.time() envelopes = capture_envelopes() - with start_transaction( - op="stuff", name="/foo", source=TRANSACTION_SOURCE_ROUTE - ) as transaction: - metrics.timing("foo", value=3.0, tags={"a": "b"}, timestamp=ts) - metrics.timing("foo", value=2.0, tags={"b": "c"}, timestamp=ts) - metrics.timing("bar", value=1.0, tags={"a": "b"}, timestamp=ts) + metrics.distribution(metric_name, 1.0, unit=metric_unit) Hub.current.flush() - (transaction, envelope) = envelopes + (envelope,) = envelopes - # Metrics Emission + assert len(envelope.items) == 1 assert envelope.items[0].headers["type"] == "statsd" - m = parse_metrics(envelope.items[0].payload.get_bytes()) - assert len(m) == 3 - assert m[0][1] == "bar@second" - assert m[1][1] == "foo@second" - assert m[2][1] == "foo@second" + parsed_metrics = parse_metrics(envelope.items[0].payload.get_bytes()) + assert len(parsed_metrics) == 1 - # Measurement Attachment - t = transaction.items[0].get_transaction_event()["_metrics_summary"] - assert len(t["d:foo@second"]) == 2 - assert { - "tags": { - "a": "b", - "environment": "not-fun-env", - "release": "fun-release@1.0.0", - "transaction": "/foo", - }, - "min": 3.0, - "max": 3.0, - "count": 1, - "sum": 3.0, - } in t["d:foo@second"] - assert { - "tags": { - "b": "c", - "environment": "not-fun-env", - "release": "fun-release@1.0.0", - "transaction": "/foo", - }, - "min": 2.0, - "max": 2.0, - "count": 1, - "sum": 2.0, - } in t["d:foo@second"] + name = parsed_metrics[0][1] + assert name == expected_name @minimum_python_37_with_gevent @pytest.mark.forked -def test_tag_normalization( - sentry_init, capture_envelopes, maybe_monkeypatched_threading +@pytest.mark.parametrize( + "metric_tag,expected_tag", + [ + ({"f-oo|bar": "%$foo/"}, {"f-oobar": "%$foo/"}), + ({"foo$.$.$bar": "blah{}"}, {"foo..bar": "blah{}"}), + # fmt: off + ({u"foö-bar": u"snöwmän"}, {u"fo-bar": u"snöwmän"},), + # fmt: on + ({"route": "GET /foo"}, {"route": "GET /foo"}), + ({"__bar__": "this | or , that"}, {"__bar__": "this \\u{7c} or \\u{2c} that"}), + ({"foo/": "hello!\n\r\t\\"}, {"foo/": "hello!\\n\\r\\t\\\\"}), + ], +) +def test_metric_tag_normalization( + sentry_init, + capture_envelopes, + metric_tag, + expected_tag, + maybe_monkeypatched_threading, ): sentry_init( - release="fun-release@1.0.0", - environment="not-fun-env", _experiments={"enable_metrics": True, "metric_code_locations": False}, ) - ts = time.time() envelopes = capture_envelopes() - # fmt: off - metrics.distribution("a", 1.0, tags={"foo-bar": "%$foo"}, timestamp=ts) - metrics.distribution("b", 1.0, tags={"foo$$$bar": "blah{}"}, timestamp=ts) - metrics.distribution("c", 1.0, tags={u"foö-bar": u"snöwmän"}, timestamp=ts) - metrics.distribution("d", 1.0, tags={"route": "GET /foo"}, timestamp=ts) - # fmt: on + metrics.distribution("a", 1.0, tags=metric_tag) + Hub.current.flush() (envelope,) = envelopes assert len(envelope.items) == 1 assert envelope.items[0].headers["type"] == "statsd" - m = parse_metrics(envelope.items[0].payload.get_bytes()) - assert len(m) == 4 - assert m[0][4] == { - "foo-bar": "$foo", - "release": "fun-release@1.0.0", - "environment": "not-fun-env", - } + parsed_metrics = parse_metrics(envelope.items[0].payload.get_bytes()) + assert len(parsed_metrics) == 1 - assert m[1][4] == { - "foo_bar": "blah{}", - "release": "fun-release@1.0.0", - "environment": "not-fun-env", - } + tags = parsed_metrics[0][4] - # fmt: off - assert m[2][4] == { - "fo_-bar": u"snöwmän", - "release": "fun-release@1.0.0", - "environment": "not-fun-env", - } - assert m[3][4] == { - "release": "fun-release@1.0.0", - "environment": "not-fun-env", - "route": "GET /foo", - } - # fmt: on + expected_tag_key, expected_tag_value = expected_tag.popitem() + assert expected_tag_key in tags + assert tags[expected_tag_key] == expected_tag_value @minimum_python_37_with_gevent @@ -853,13 +777,14 @@ def test_tag_normalization( def test_before_emit_metric( sentry_init, capture_envelopes, maybe_monkeypatched_threading ): - def before_emit(key, tags): - if key == "removed-metric": + def before_emit(key, value, unit, tags): + if key == "removed-metric" or value == 47 or unit == "unsupported": return False + tags["extra"] = "foo" del tags["release"] # this better be a noop! - metrics.incr("shitty-recursion") + metrics.increment("shitty-recursion") return True sentry_init( @@ -873,8 +798,10 @@ def before_emit(key, tags): ) envelopes = capture_envelopes() - metrics.incr("removed-metric", 1.0) - metrics.incr("actual-metric", 1.0) + metrics.increment("removed-metric", 1.0) + metrics.increment("another-removed-metric", 47) + metrics.increment("yet-another-removed-metric", 1.0, unit="unsupported") + metrics.increment("actual-metric", 1.0) Hub.current.flush() (envelope,) = envelopes @@ -906,7 +833,7 @@ def test_aggregator_flush( ) envelopes = capture_envelopes() - metrics.incr("a-metric", 1.0) + metrics.increment("a-metric", 1.0) Hub.current.flush() assert len(envelopes) == 1 @@ -925,7 +852,7 @@ def test_tag_serialization( ) envelopes = capture_envelopes() - metrics.incr( + metrics.increment( "counter", tags={ "no-value": None, @@ -970,12 +897,12 @@ def test_flush_recursion_protection( real_capture_envelope = test_client.transport.capture_envelope def bad_capture_envelope(*args, **kwargs): - metrics.incr("bad-metric") + metrics.increment("bad-metric") return real_capture_envelope(*args, **kwargs) monkeypatch.setattr(test_client.transport, "capture_envelope", bad_capture_envelope) - metrics.incr("counter") + metrics.increment("counter") # flush twice to see the inner metric Hub.current.flush() @@ -1004,12 +931,12 @@ def test_flush_recursion_protection_background_flush( real_capture_envelope = test_client.transport.capture_envelope def bad_capture_envelope(*args, **kwargs): - metrics.incr("bad-metric") + metrics.increment("bad-metric") return real_capture_envelope(*args, **kwargs) monkeypatch.setattr(test_client.transport, "capture_envelope", bad_capture_envelope) - metrics.incr("counter") + metrics.increment("counter") # flush via sleep and flag Hub.current.client.metrics_aggregator._force_flush = True diff --git a/tests/test_transport.py b/tests/test_transport.py index c1f70b0108..8848ad471e 100644 --- a/tests/test_transport.py +++ b/tests/test_transport.py @@ -14,7 +14,7 @@ from sentry_sdk import Hub, Client, add_breadcrumb, capture_message, Scope from sentry_sdk._compat import datetime_utcnow from sentry_sdk.transport import KEEP_ALIVE_SOCKET_OPTIONS, _parse_rate_limits -from sentry_sdk.envelope import Envelope, parse_json +from sentry_sdk.envelope import Envelope, Item, parse_json from sentry_sdk.integrations.logging import LoggingIntegration try: @@ -466,3 +466,114 @@ def test_complex_limits_without_data_category( client.flush() assert len(capturing_server.captured) == 0 + + +@pytest.mark.parametrize("response_code", [200, 429]) +def test_metric_bucket_limits(capturing_server, response_code, make_client): + client = make_client() + capturing_server.respond_with( + code=response_code, + headers={ + "X-Sentry-Rate-Limits": "4711:metric_bucket:organization:quota_exceeded:custom" + }, + ) + + envelope = Envelope() + envelope.add_item(Item(payload=b"{}", type="statsd")) + client.transport.capture_envelope(envelope) + client.flush() + + assert len(capturing_server.captured) == 1 + assert capturing_server.captured[0].path == "/api/132/envelope/" + capturing_server.clear_captured() + + assert set(client.transport._disabled_until) == set(["metric_bucket"]) + + client.transport.capture_envelope(envelope) + client.capture_event({"type": "transaction"}) + client.flush() + + assert len(capturing_server.captured) == 2 + + envelope = capturing_server.captured[0].envelope + assert envelope.items[0].type == "transaction" + envelope = capturing_server.captured[1].envelope + assert envelope.items[0].type == "client_report" + report = parse_json(envelope.items[0].get_bytes()) + assert report["discarded_events"] == [ + {"category": "metric_bucket", "reason": "ratelimit_backoff", "quantity": 1}, + ] + + +@pytest.mark.parametrize("response_code", [200, 429]) +def test_metric_bucket_limits_with_namespace( + capturing_server, response_code, make_client +): + client = make_client() + capturing_server.respond_with( + code=response_code, + headers={ + "X-Sentry-Rate-Limits": "4711:metric_bucket:organization:quota_exceeded:foo" + }, + ) + + envelope = Envelope() + envelope.add_item(Item(payload=b"{}", type="statsd")) + client.transport.capture_envelope(envelope) + client.flush() + + assert len(capturing_server.captured) == 1 + assert capturing_server.captured[0].path == "/api/132/envelope/" + capturing_server.clear_captured() + + assert set(client.transport._disabled_until) == set([]) + + client.transport.capture_envelope(envelope) + client.capture_event({"type": "transaction"}) + client.flush() + + assert len(capturing_server.captured) == 2 + + envelope = capturing_server.captured[0].envelope + assert envelope.items[0].type == "statsd" + envelope = capturing_server.captured[1].envelope + assert envelope.items[0].type == "transaction" + + +@pytest.mark.parametrize("response_code", [200, 429]) +def test_metric_bucket_limits_with_all_namespaces( + capturing_server, response_code, make_client +): + client = make_client() + capturing_server.respond_with( + code=response_code, + headers={ + "X-Sentry-Rate-Limits": "4711:metric_bucket:organization:quota_exceeded" + }, + ) + + envelope = Envelope() + envelope.add_item(Item(payload=b"{}", type="statsd")) + client.transport.capture_envelope(envelope) + client.flush() + + assert len(capturing_server.captured) == 1 + assert capturing_server.captured[0].path == "/api/132/envelope/" + capturing_server.clear_captured() + + assert set(client.transport._disabled_until) == set(["metric_bucket"]) + + client.transport.capture_envelope(envelope) + client.capture_event({"type": "transaction"}) + client.flush() + + assert len(capturing_server.captured) == 2 + + envelope = capturing_server.captured[0].envelope + assert envelope.items[0].type == "transaction" + envelope = capturing_server.captured[1].envelope + assert envelope.items[0].type == "client_report" + report = parse_json(envelope.items[0].get_bytes()) + assert report["discarded_events"] == [ + {"category": "metric_bucket", "reason": "ratelimit_backoff", "quantity": 1}, + ]