diff --git a/.github/workflows/test-integrations-data-processing.yml b/.github/workflows/test-integrations-data-processing.yml index ebcd89efea..b9f1b3fdcb 100644 --- a/.github/workflows/test-integrations-data-processing.yml +++ b/.github/workflows/test-integrations-data-processing.yml @@ -42,6 +42,10 @@ jobs: - name: Erase coverage run: | coverage erase + - name: Test anthropic latest + run: | + set -x # print commands that are executed + ./scripts/runtox.sh "py${{ matrix.python-version }}-anthropic-latest" --cov=tests --cov=sentry_sdk --cov-report= --cov-branch - name: Test arq latest run: | set -x # print commands that are executed @@ -58,10 +62,18 @@ jobs: run: | set -x # print commands that are executed ./scripts/runtox.sh "py${{ matrix.python-version }}-huey-latest" --cov=tests --cov=sentry_sdk --cov-report= --cov-branch + - name: Test langchain latest + run: | + set -x # print commands that are executed + ./scripts/runtox.sh "py${{ matrix.python-version }}-langchain-latest" --cov=tests --cov=sentry_sdk --cov-report= --cov-branch - name: Test openai latest run: | set -x # print commands that are executed ./scripts/runtox.sh "py${{ matrix.python-version }}-openai-latest" --cov=tests --cov=sentry_sdk --cov-report= --cov-branch + - name: Test huggingface_hub latest + run: | + set -x # print commands that are executed + ./scripts/runtox.sh "py${{ matrix.python-version }}-huggingface_hub-latest" --cov=tests --cov=sentry_sdk --cov-report= --cov-branch - name: Test rq latest run: | set -x # print commands that are executed @@ -98,6 +110,10 @@ jobs: - name: Erase coverage run: | coverage erase + - name: Test anthropic pinned + run: | + set -x # print commands that are executed + ./scripts/runtox.sh --exclude-latest "py${{ matrix.python-version }}-anthropic" --cov=tests --cov=sentry_sdk --cov-report= --cov-branch - name: Test arq pinned run: | set -x # print commands that are executed @@ -114,10 +130,18 @@ jobs: run: | set -x # print commands that are executed ./scripts/runtox.sh --exclude-latest "py${{ matrix.python-version }}-huey" --cov=tests --cov=sentry_sdk --cov-report= --cov-branch + - name: Test langchain pinned + run: | + set -x # print commands that are executed + ./scripts/runtox.sh --exclude-latest "py${{ matrix.python-version }}-langchain" --cov=tests --cov=sentry_sdk --cov-report= --cov-branch - name: Test openai pinned run: | set -x # print commands that are executed ./scripts/runtox.sh --exclude-latest "py${{ matrix.python-version }}-openai" --cov=tests --cov=sentry_sdk --cov-report= --cov-branch + - name: Test huggingface_hub pinned + run: | + set -x # print commands that are executed + ./scripts/runtox.sh --exclude-latest "py${{ matrix.python-version }}-huggingface_hub" --cov=tests --cov=sentry_sdk --cov-report= --cov-branch - name: Test rq pinned run: | set -x # print commands that are executed diff --git a/CHANGELOG.md b/CHANGELOG.md index 16a3072db5..df6c8cfdc1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,85 @@ # Changelog +## 2.1.1 + +- Fix trace propagation in Celery tasks started by Celery Beat. (#3047) by @antonpirker + +## 2.1.0 + +- fix(quart): Fix Quart integration (#3043) by @szokeasaurusrex + +- **New integration:** [Langchain](https://docs.sentry.io/platforms/python/integrations/langchain/) (#2911) by @colin-sentry + + Usage: (Langchain is auto enabling, so you do not need to do anything special) + ```python + from langchain_openai import ChatOpenAI + import sentry_sdk + + sentry_sdk.init( + dsn="...", + enable_tracing=True, + traces_sample_rate=1.0, + ) + + llm = ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0) + ``` + + Check out [the LangChain docs](https://docs.sentry.io/platforms/python/integrations/langchain/) for details. + +- **New integration:** [Anthropic](https://docs.sentry.io/platforms/python/integrations/anthropic/) (#2831) by @czyber + + Usage: (add the AnthropicIntegration to your `sentry_sdk.init()` call) + ```python + from anthropic import Anthropic + + import sentry_sdk + + sentry_sdk.init( + dsn="...", + enable_tracing=True, + traces_sample_rate=1.0, + integrations=[AnthropicIntegration()], + ) + + client = Anthropic() + ``` + Check out [the Anthropic docs](https://docs.sentry.io/platforms/python/integrations/anthropic/) for details. + +- **New integration:** [Huggingface Hub](https://docs.sentry.io/platforms/python/integrations/huggingface/) (#3033) by @colin-sentry + + Usage: (Huggingface Hub is auto enabling, so you do not need to do anything special) + + ```python + import sentry_sdk + from huggingface_hub import InferenceClient + + sentry_sdk.init( + dsn="...", + enable_tracing=True, + traces_sample_rate=1.0, + ) + + client = InferenceClient("some-model") + ``` + + Check out [the Huggingface docs](https://docs.sentry.io/platforms/python/integrations/huggingface/) for details. (comming soon!) + +- fix(huggingface): Reduce API cross-section for huggingface in test (#3042) by @colin-sentry +- fix(django): Fix Django ASGI integration on Python 3.12 (#3027) by @bellini666 +- feat(perf): Add ability to put measurements directly on spans. (#2967) by @colin-sentry +- fix(tests): Fix trytond tests (#3031) by @sentrivana +- fix(tests): Update `pytest-asyncio` to fix CI (#3030) by @sentrivana +- fix(docs): Link to respective migration guides directly (#3020) by @sentrivana +- docs(scope): Add docstring to `Scope.set_tags` (#2978) by @szokeasaurusrex +- test(scope): Fix typos in assert error message (#2978) by @szokeasaurusrex +- feat(scope): New `set_tags` function (#2978) by @szokeasaurusrex +- test(scope): Add unit test for `Scope.set_tags` (#2978) by @szokeasaurusrex +- feat(scope): Add `set_tags` to top-level API (#2978) by @szokeasaurusrex +- test(scope): Add unit test for top-level API `set_tags` (#2978) by @szokeasaurusrex +- feat(tests): Parallelize tox (#3025) by @sentrivana +- build(deps): Bump checkouts/data-schemas from `4aa14a7` to `4381a97` (#3028) by @dependabot +- meta(license): Bump copyright year (#3029) by @szokeasaurusrex + ## 2.0.1 ### Various fixes & improvements diff --git a/LICENSE b/LICENSE index 016323bd8d..c4c8162f13 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2018 Functional Software, Inc. dba Sentry +Copyright (c) 2018-2024 Functional Software, Inc. dba Sentry Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/README.md b/README.md index 130783c0e9..89edb131b1 100644 --- a/README.md +++ b/README.md @@ -78,13 +78,13 @@ See [the documentation](https://docs.sentry.io/platforms/python/integrations/) f ### Migrating From `1.x` to `2.x` -If you're on SDK version 1.x, we highly recommend updating to the 2.x major. To make the process easier we've prepared a [migration guide](https://docs.sentry.io/platforms/python/migration/) with the most common changes as well as a [detailed changelog](MIGRATION_GUIDE.md). +If you're on SDK version 1.x, we highly recommend updating to the 2.x major. To make the process easier we've prepared a [migration guide](https://docs.sentry.io/platforms/python/migration/1.x-to-2.x) with the most common changes as well as a [detailed changelog](MIGRATION_GUIDE.md). ### Migrating From `raven-python` The old `raven-python` client has entered maintenance mode and was moved [here](https://github.com/getsentry/raven-python). -If you're using `raven-python`, we recommend you to migrate to this new SDK. You can find the benefits of migrating and how to do it in our [migration guide](https://docs.sentry.io/platforms/python/migration/). +If you're using `raven-python`, we recommend you to migrate to this new SDK. You can find the benefits of migrating and how to do it in our [migration guide](https://docs.sentry.io/platforms/python/migration/raven-to-sentry-sdk/). ## Contributing to the SDK diff --git a/checkouts/data-schemas b/checkouts/data-schemas index 4aa14a74b6..4381a979b1 160000 --- a/checkouts/data-schemas +++ b/checkouts/data-schemas @@ -1 +1 @@ -Subproject commit 4aa14a74b6a3c8e468af08acbe2cf3a7064151d4 +Subproject commit 4381a979b18786b2cb37e1937bc685fd46a33c5e diff --git a/docs/conf.py b/docs/conf.py index ae1ab934b3..0f3c483d0b 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -28,7 +28,7 @@ copyright = "2019-{}, Sentry Team and Contributors".format(datetime.now().year) author = "Sentry Team and Contributors" -release = "2.0.1" +release = "2.1.1" version = ".".join(release.split(".")[:2]) # The short X.Y version. diff --git a/mypy.ini b/mypy.ini index c1444d61e5..4f143ede97 100644 --- a/mypy.ini +++ b/mypy.ini @@ -36,6 +36,8 @@ ignore_missing_imports = True ignore_missing_imports = True [mypy-aiohttp.*] ignore_missing_imports = True +[mypy-anthropic.*] +ignore_missing_imports = True [mypy-sanic.*] ignore_missing_imports = True [mypy-tornado.*] @@ -48,6 +50,8 @@ ignore_missing_imports = True ignore_missing_imports = True [mypy-asgiref.*] ignore_missing_imports = True +[mypy-langchain_core.*] +ignore_missing_imports = True [mypy-executing.*] ignore_missing_imports = True [mypy-asttokens.*] @@ -69,6 +73,8 @@ ignore_missing_imports = True ignore_missing_imports = True [mypy-openai.*] ignore_missing_imports = True +[mypy-huggingface_hub.*] +ignore_missing_imports = True [mypy-arq.*] ignore_missing_imports = True [mypy-grpc.*] diff --git a/scripts/aws_lambda_functions/sentryPythonDeleteTestFunctions/lambda_function.py b/scripts/aws_lambda_functions/sentryPythonDeleteTestFunctions/lambda_function.py index 1fc3994176..ce7afb6aa4 100644 --- a/scripts/aws_lambda_functions/sentryPythonDeleteTestFunctions/lambda_function.py +++ b/scripts/aws_lambda_functions/sentryPythonDeleteTestFunctions/lambda_function.py @@ -1,12 +1,12 @@ import boto3 -import sentry_sdk +import sentry_sdk monitor_slug = "python-sdk-aws-lambda-tests-cleanup" monitor_config = { "schedule": { "type": "crontab", - "value": "0 12 * * 0", # 12 o'clock on Sunday + "value": "0 12 * * 0", # 12 o'clock on Sunday }, "timezone": "UTC", "checkin_margin": 2, @@ -24,7 +24,7 @@ def delete_lambda_functions(prefix="test_"): """ client = boto3.client("lambda", region_name="us-east-1") functions_deleted = 0 - + functions_paginator = client.get_paginator("list_functions") for functions_page in functions_paginator.paginate(): for func in functions_page["Functions"]: @@ -39,17 +39,17 @@ def delete_lambda_functions(prefix="test_"): print(f"Got exception: {ex}") return functions_deleted - + def lambda_handler(event, context): functions_deleted = delete_lambda_functions() - + sentry_sdk.metrics.gauge( - key="num_aws_functions_deleted", + key="num_aws_functions_deleted", value=functions_deleted, ) - + return { - 'statusCode': 200, - 'body': f"{functions_deleted} AWS Lambda functions deleted successfully." + "statusCode": 200, + "body": f"{functions_deleted} AWS Lambda functions deleted successfully.", } diff --git a/scripts/runtox.sh b/scripts/runtox.sh index 50da44dd53..146af7c665 100755 --- a/scripts/runtox.sh +++ b/scripts/runtox.sh @@ -40,4 +40,4 @@ if [ -z "${ENV}" ]; then exit 0 fi -exec $TOXPATH -e "$ENV" -- "${@:2}" +exec $TOXPATH -p auto -o -e "$ENV" -- "${@:2}" diff --git a/scripts/split-tox-gh-actions/split-tox-gh-actions.py b/scripts/split-tox-gh-actions/split-tox-gh-actions.py index 6b456c5544..5d5f423857 100755 --- a/scripts/split-tox-gh-actions/split-tox-gh-actions.py +++ b/scripts/split-tox-gh-actions/split-tox-gh-actions.py @@ -66,11 +66,14 @@ "gcp", ], "Data Processing": [ + "anthropic", "arq", "beam", "celery", "huey", + "langchain", "openai", + "huggingface_hub", "rq", ], "Databases": [ diff --git a/sentry_sdk/__init__.py b/sentry_sdk/__init__.py index 6c44867476..1b646992ff 100644 --- a/sentry_sdk/__init__.py +++ b/sentry_sdk/__init__.py @@ -40,6 +40,7 @@ "set_level", "set_measurement", "set_tag", + "set_tags", "set_user", "start_span", "start_transaction", diff --git a/sentry_sdk/_types.py b/sentry_sdk/_types.py index 1577dbde4f..fd0747eef3 100644 --- a/sentry_sdk/_types.py +++ b/sentry_sdk/_types.py @@ -28,6 +28,45 @@ # "critical" is an alias of "fatal" recognized by Relay LogLevelStr = Literal["fatal", "critical", "error", "warning", "info", "debug"] + DurationUnit = Literal[ + "nanosecond", + "microsecond", + "millisecond", + "second", + "minute", + "hour", + "day", + "week", + ] + + InformationUnit = Literal[ + "bit", + "byte", + "kilobyte", + "kibibyte", + "megabyte", + "mebibyte", + "gigabyte", + "gibibyte", + "terabyte", + "tebibyte", + "petabyte", + "pebibyte", + "exabyte", + "exbibyte", + ] + + FractionUnit = Literal["ratio", "percent"] + MeasurementUnit = Union[DurationUnit, InformationUnit, FractionUnit, str] + + MeasurementValue = TypedDict( + "MeasurementValue", + { + "value": float, + "unit": Optional[MeasurementUnit], + }, + ) + Event = TypedDict( "Event", { @@ -49,7 +88,7 @@ "level": LogLevelStr, "logentry": Mapping[str, object], "logger": str, - "measurements": dict[str, object], + "measurements": dict[str, MeasurementValue], "message": str, "modules": dict[str, str], "monitor_config": Mapping[str, object], @@ -118,37 +157,6 @@ ] SessionStatus = Literal["ok", "exited", "crashed", "abnormal"] - DurationUnit = Literal[ - "nanosecond", - "microsecond", - "millisecond", - "second", - "minute", - "hour", - "day", - "week", - ] - - InformationUnit = Literal[ - "bit", - "byte", - "kilobyte", - "kibibyte", - "megabyte", - "mebibyte", - "gigabyte", - "gibibyte", - "terabyte", - "tebibyte", - "petabyte", - "pebibyte", - "exabyte", - "exbibyte", - ] - - FractionUnit = Literal["ratio", "percent"] - MeasurementUnit = Union[DurationUnit, InformationUnit, FractionUnit, str] - ProfilerMode = Literal["sleep", "thread", "gevent", "unknown"] # Type of the metric. diff --git a/sentry_sdk/ai/__init__.py b/sentry_sdk/ai/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/sentry_sdk/ai/monitoring.py b/sentry_sdk/ai/monitoring.py new file mode 100644 index 0000000000..f5f9cd7aad --- /dev/null +++ b/sentry_sdk/ai/monitoring.py @@ -0,0 +1,77 @@ +from functools import wraps + +import sentry_sdk.utils +from sentry_sdk import start_span +from sentry_sdk.tracing import Span +from sentry_sdk.utils import ContextVar +from sentry_sdk._types import TYPE_CHECKING + +if TYPE_CHECKING: + from typing import Optional, Callable, Any + +_ai_pipeline_name = ContextVar("ai_pipeline_name", default=None) + + +def set_ai_pipeline_name(name): + # type: (Optional[str]) -> None + _ai_pipeline_name.set(name) + + +def get_ai_pipeline_name(): + # type: () -> Optional[str] + return _ai_pipeline_name.get() + + +def ai_track(description, **span_kwargs): + # type: (str, Any) -> Callable[..., Any] + def decorator(f): + # type: (Callable[..., Any]) -> Callable[..., Any] + @wraps(f) + def wrapped(*args, **kwargs): + # type: (Any, Any) -> Any + curr_pipeline = _ai_pipeline_name.get() + op = span_kwargs.get("op", "ai.run" if curr_pipeline else "ai.pipeline") + with start_span(description=description, op=op, **span_kwargs) as span: + if curr_pipeline: + span.set_data("ai.pipeline.name", curr_pipeline) + return f(*args, **kwargs) + else: + _ai_pipeline_name.set(description) + try: + res = f(*args, **kwargs) + except Exception as e: + event, hint = sentry_sdk.utils.event_from_exception( + e, + client_options=sentry_sdk.get_client().options, + mechanism={"type": "ai_monitoring", "handled": False}, + ) + sentry_sdk.capture_event(event, hint=hint) + raise e from None + finally: + _ai_pipeline_name.set(None) + return res + + return wrapped + + return decorator + + +def record_token_usage( + span, prompt_tokens=None, completion_tokens=None, total_tokens=None +): + # type: (Span, Optional[int], Optional[int], Optional[int]) -> None + ai_pipeline_name = get_ai_pipeline_name() + if ai_pipeline_name: + span.set_data("ai.pipeline.name", ai_pipeline_name) + if prompt_tokens is not None: + span.set_measurement("ai_prompt_tokens_used", value=prompt_tokens) + if completion_tokens is not None: + span.set_measurement("ai_completion_tokens_used", value=completion_tokens) + if ( + total_tokens is None + and prompt_tokens is not None + and completion_tokens is not None + ): + total_tokens = prompt_tokens + completion_tokens + if total_tokens is not None: + span.set_measurement("ai_total_tokens_used", total_tokens) diff --git a/sentry_sdk/ai/utils.py b/sentry_sdk/ai/utils.py new file mode 100644 index 0000000000..42d46304e4 --- /dev/null +++ b/sentry_sdk/ai/utils.py @@ -0,0 +1,32 @@ +from sentry_sdk._types import TYPE_CHECKING + +if TYPE_CHECKING: + from typing import Any + +from sentry_sdk.tracing import Span +from sentry_sdk.utils import logger + + +def _normalize_data(data): + # type: (Any) -> Any + + # convert pydantic data (e.g. OpenAI v1+) to json compatible format + if hasattr(data, "model_dump"): + try: + return data.model_dump() + except Exception as e: + logger.warning("Could not convert pydantic data to JSON: %s", e) + return data + if isinstance(data, list): + if len(data) == 1: + return _normalize_data(data[0]) # remove empty dimensions + return list(_normalize_data(x) for x in data) + if isinstance(data, dict): + return {k: _normalize_data(v) for (k, v) in data.items()} + return data + + +def set_data_normalized(span, key, value): + # type: (Span, str, Any) -> None + normalized = _normalize_data(value) + span.set_data(key, normalized) diff --git a/sentry_sdk/api.py b/sentry_sdk/api.py index f00ed9f96a..37c81afcc5 100644 --- a/sentry_sdk/api.py +++ b/sentry_sdk/api.py @@ -8,6 +8,8 @@ from sentry_sdk.tracing import NoOpSpan, Transaction if TYPE_CHECKING: + from collections.abc import Mapping + from typing import Any from typing import Dict from typing import Generator @@ -64,6 +66,7 @@ def overload(x): "set_level", "set_measurement", "set_tag", + "set_tags", "set_user", "start_span", "start_transaction", @@ -239,6 +242,12 @@ def set_tag(key, value): return Scope.get_isolation_scope().set_tag(key, value) +@scopemethod +def set_tags(tags): + # type: (Mapping[str, object]) -> None + Scope.get_isolation_scope().set_tags(tags) + + @scopemethod def set_context(key, value): # type: (str, Dict[str, Any]) -> None diff --git a/sentry_sdk/consts.py b/sentry_sdk/consts.py index b72701daed..4a0efd2486 100644 --- a/sentry_sdk/consts.py +++ b/sentry_sdk/consts.py @@ -91,6 +91,85 @@ class SPANDATA: See: https://develop.sentry.dev/sdk/performance/span-data-conventions/ """ + AI_INPUT_MESSAGES = "ai.input_messages" + """ + The input messages to an LLM call. + Example: [{"role": "user", "message": "hello"}] + """ + + AI_MODEL_ID = "ai.model_id" + """ + The unique descriptor of the model being execugted + Example: gpt-4 + """ + + AI_METADATA = "ai.metadata" + """ + Extra metadata passed to an AI pipeline step. + Example: {"executed_function": "add_integers"} + """ + + AI_TAGS = "ai.tags" + """ + Tags that describe an AI pipeline step. + Example: {"executed_function": "add_integers"} + """ + + AI_STREAMING = "ai.streaming" + """ + Whether or not the AI model call's repsonse was streamed back asynchronously + Example: true + """ + + AI_TEMPERATURE = "ai.temperature" + """ + For an AI model call, the temperature parameter. Temperature essentially means how random the output will be. + Example: 0.5 + """ + + AI_TOP_P = "ai.top_p" + """ + For an AI model call, the top_p parameter. Top_p essentially controls how random the output will be. + Example: 0.5 + """ + + AI_TOP_K = "ai.top_k" + """ + For an AI model call, the top_k parameter. Top_k essentially controls how random the output will be. + Example: 35 + """ + + AI_FUNCTION_CALL = "ai.function_call" + """ + For an AI model call, the function that was called. This is deprecated for OpenAI, and replaced by tool_calls + """ + + AI_TOOL_CALLS = "ai.tool_calls" + """ + For an AI model call, the function that was called. This is deprecated for OpenAI, and replaced by tool_calls + """ + + AI_TOOLS = "ai.tools" + """ + For an AI model call, the functions that are available + """ + + AI_RESPONSE_FORMAT = "ai.response_format" + """ + For an AI model call, the format of the response + """ + + AI_LOGIT_BIAS = "ai.response_format" + """ + For an AI model call, the logit bias + """ + + AI_RESPONSES = "ai.responses" + """ + The responses to an AI model call. Always as a list. + Example: ["hello", "world"] + """ + DB_NAME = "db.name" """ The name of the database being accessed. For commands that switch the database, this should be set to the target database (even if the command fails). @@ -217,6 +296,7 @@ class SPANDATA: class OP: + ANTHROPIC_MESSAGES_CREATE = "ai.messages.create.anthropic" CACHE_GET_ITEM = "cache.get_item" DB = "db" DB_REDIS = "db.redis" @@ -245,6 +325,14 @@ class OP: MIDDLEWARE_STARLITE_SEND = "middleware.starlite.send" OPENAI_CHAT_COMPLETIONS_CREATE = "ai.chat_completions.create.openai" OPENAI_EMBEDDINGS_CREATE = "ai.embeddings.create.openai" + HUGGINGFACE_HUB_CHAT_COMPLETIONS_CREATE = ( + "ai.chat_completions.create.huggingface_hub" + ) + LANGCHAIN_PIPELINE = "ai.pipeline.langchain" + LANGCHAIN_RUN = "ai.run.langchain" + LANGCHAIN_TOOL = "ai.tool.langchain" + LANGCHAIN_AGENT = "ai.agent.langchain" + LANGCHAIN_CHAT_COMPLETIONS_CREATE = "ai.chat_completions.create.langchain" QUEUE_SUBMIT_ARQ = "queue.submit.arq" QUEUE_TASK_ARQ = "queue.task.arq" QUEUE_SUBMIT_CELERY = "queue.submit.celery" @@ -345,4 +433,4 @@ def _get_default_options(): del _get_default_options -VERSION = "2.0.1" +VERSION = "2.1.1" diff --git a/sentry_sdk/integrations/__init__.py b/sentry_sdk/integrations/__init__.py index b0ec5e2d3e..fffd573491 100644 --- a/sentry_sdk/integrations/__init__.py +++ b/sentry_sdk/integrations/__init__.py @@ -85,6 +85,8 @@ def iter_default_integrations(with_auto_enabling_integrations): "sentry_sdk.integrations.graphene.GrapheneIntegration", "sentry_sdk.integrations.httpx.HttpxIntegration", "sentry_sdk.integrations.huey.HueyIntegration", + "sentry_sdk.integrations.huggingface_hub.HuggingfaceHubIntegration", + "sentry_sdk.integrations.langchain.LangchainIntegration", "sentry_sdk.integrations.loguru.LoguruIntegration", "sentry_sdk.integrations.openai.OpenAIIntegration", "sentry_sdk.integrations.pymongo.PyMongoIntegration", diff --git a/sentry_sdk/integrations/anthropic.py b/sentry_sdk/integrations/anthropic.py new file mode 100644 index 0000000000..9d43093ac4 --- /dev/null +++ b/sentry_sdk/integrations/anthropic.py @@ -0,0 +1,170 @@ +from functools import wraps + +import sentry_sdk +from sentry_sdk.ai.monitoring import record_token_usage +from sentry_sdk.consts import OP, SPANDATA +from sentry_sdk.integrations import DidNotEnable, Integration +from sentry_sdk.scope import should_send_default_pii +from sentry_sdk.utils import ( + capture_internal_exceptions, + ensure_integration_enabled, + event_from_exception, + package_version, +) + +from anthropic.resources import Messages + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from typing import Any, Iterator + from anthropic.types import MessageStreamEvent + from sentry_sdk.tracing import Span + + +class AnthropicIntegration(Integration): + identifier = "anthropic" + + def __init__(self, include_prompts=True): + # type: (AnthropicIntegration, bool) -> None + self.include_prompts = include_prompts + + @staticmethod + def setup_once(): + # type: () -> None + version = package_version("anthropic") + + if version is None: + raise DidNotEnable("Unparsable anthropic version.") + + if version < (0, 16): + raise DidNotEnable("anthropic 0.16 or newer required.") + + Messages.create = _wrap_message_create(Messages.create) + + +def _capture_exception(exc): + # type: (Any) -> None + event, hint = event_from_exception( + exc, + client_options=sentry_sdk.get_client().options, + mechanism={"type": "anthropic", "handled": False}, + ) + sentry_sdk.capture_event(event, hint=hint) + + +def _calculate_token_usage(result, span): + # type: (Messages, Span) -> None + input_tokens = 0 + output_tokens = 0 + if hasattr(result, "usage"): + usage = result.usage + if hasattr(usage, "input_tokens") and isinstance(usage.input_tokens, int): + input_tokens = usage.input_tokens + if hasattr(usage, "output_tokens") and isinstance(usage.output_tokens, int): + output_tokens = usage.output_tokens + + total_tokens = input_tokens + output_tokens + record_token_usage(span, input_tokens, output_tokens, total_tokens) + + +def _wrap_message_create(f): + # type: (Any) -> Any + @wraps(f) + @ensure_integration_enabled(AnthropicIntegration, f) + def _sentry_patched_create(*args, **kwargs): + # type: (*Any, **Any) -> Any + if "messages" not in kwargs: + return f(*args, **kwargs) + + try: + iter(kwargs["messages"]) + except TypeError: + return f(*args, **kwargs) + + messages = list(kwargs["messages"]) + model = kwargs.get("model") + + span = sentry_sdk.start_span( + op=OP.ANTHROPIC_MESSAGES_CREATE, description="Anthropic messages create" + ) + span.__enter__() + + try: + result = f(*args, **kwargs) + except Exception as exc: + _capture_exception(exc) + span.__exit__(None, None, None) + raise exc from None + + integration = sentry_sdk.get_client().get_integration(AnthropicIntegration) + + with capture_internal_exceptions(): + span.set_data(SPANDATA.AI_MODEL_ID, model) + span.set_data(SPANDATA.AI_STREAMING, False) + if should_send_default_pii() and integration.include_prompts: + span.set_data(SPANDATA.AI_INPUT_MESSAGES, messages) + if hasattr(result, "content"): + if should_send_default_pii() and integration.include_prompts: + span.set_data( + SPANDATA.AI_RESPONSES, + list( + map( + lambda message: { + "type": message.type, + "text": message.text, + }, + result.content, + ) + ), + ) + _calculate_token_usage(result, span) + span.__exit__(None, None, None) + elif hasattr(result, "_iterator"): + old_iterator = result._iterator + + def new_iterator(): + # type: () -> Iterator[MessageStreamEvent] + input_tokens = 0 + output_tokens = 0 + content_blocks = [] + with capture_internal_exceptions(): + for event in old_iterator: + if hasattr(event, "type"): + if event.type == "message_start": + usage = event.message.usage + input_tokens += usage.input_tokens + output_tokens += usage.output_tokens + elif event.type == "content_block_start": + pass + elif event.type == "content_block_delta": + content_blocks.append(event.delta.text) + elif event.type == "content_block_stop": + pass + elif event.type == "message_delta": + output_tokens += event.usage.output_tokens + elif event.type == "message_stop": + continue + yield event + + if should_send_default_pii() and integration.include_prompts: + complete_message = "".join(content_blocks) + span.set_data( + SPANDATA.AI_RESPONSES, + [{"type": "text", "text": complete_message}], + ) + total_tokens = input_tokens + output_tokens + record_token_usage( + span, input_tokens, output_tokens, total_tokens + ) + span.set_data(SPANDATA.AI_STREAMING, True) + span.__exit__(None, None, None) + + result._iterator = new_iterator() + else: + span.set_data("unknown_response", True) + span.__exit__(None, None, None) + + return result + + return _sentry_patched_create diff --git a/sentry_sdk/integrations/celery/__init__.py b/sentry_sdk/integrations/celery/__init__.py index 74205a0184..62fdb1da6f 100644 --- a/sentry_sdk/integrations/celery/__init__.py +++ b/sentry_sdk/integrations/celery/__init__.py @@ -30,6 +30,7 @@ from typing import List from typing import Optional from typing import TypeVar + from typing import Union from sentry_sdk._types import EventProcessor, Event, Hint, ExcInfo from sentry_sdk.tracing import Span @@ -223,6 +224,16 @@ def _update_celery_task_headers(original_headers, span, monitor_beat_tasks): return updated_headers +class NoOpMgr: + def __enter__(self): + # type: () -> None + return None + + def __exit__(self, exc_type, exc_value, traceback): + # type: (Any, Any, Any) -> None + return None + + def _wrap_apply_async(f): # type: (F) -> F @wraps(f) @@ -242,9 +253,17 @@ def apply_async(*args, **kwargs): task = args[0] - with sentry_sdk.start_span( - op=OP.QUEUE_SUBMIT_CELERY, description=task.name - ) as span: + task_started_from_beat = ( + sentry_sdk.Scope.get_isolation_scope()._name == "celery-beat" + ) + + span_mgr = ( + sentry_sdk.start_span(op=OP.QUEUE_SUBMIT_CELERY, description=task.name) + if not task_started_from_beat + else NoOpMgr() + ) # type: Union[Span, NoOpMgr] + + with span_mgr as span: kwargs["headers"] = _update_celery_task_headers( kwarg_headers, span, integration.monitor_beat_tasks ) diff --git a/sentry_sdk/integrations/django/asgi.py b/sentry_sdk/integrations/django/asgi.py index b52ca6dd33..e62ce681e7 100644 --- a/sentry_sdk/integrations/django/asgi.py +++ b/sentry_sdk/integrations/django/asgi.py @@ -8,6 +8,7 @@ import asyncio import functools +import inspect from django.core.handlers.wsgi import WSGIRequest @@ -25,14 +26,31 @@ if TYPE_CHECKING: - from collections.abc import Callable - from typing import Any, Union + from typing import Any, Callable, Union, TypeVar from django.core.handlers.asgi import ASGIRequest from django.http.response import HttpResponse from sentry_sdk._types import Event, EventProcessor + _F = TypeVar("_F", bound=Callable[..., Any]) + + +# Python 3.12 deprecates asyncio.iscoroutinefunction() as an alias for +# inspect.iscoroutinefunction(), whilst also removing the _is_coroutine marker. +# The latter is replaced with the inspect.markcoroutinefunction decorator. +# Until 3.12 is the minimum supported Python version, provide a shim. +# This was copied from https://github.com/django/asgiref/blob/main/asgiref/sync.py +if hasattr(inspect, "markcoroutinefunction"): + iscoroutinefunction = inspect.iscoroutinefunction + markcoroutinefunction = inspect.markcoroutinefunction +else: + iscoroutinefunction = asyncio.iscoroutinefunction # type: ignore[assignment] + + def markcoroutinefunction(func: "_F") -> "_F": + func._is_coroutine = asyncio.coroutines._is_coroutine # type: ignore + return func + def _make_asgi_request_event_processor(request): # type: (ASGIRequest) -> EventProcessor @@ -181,8 +199,8 @@ def _async_check(self): a thread is not consumed during a whole request. Taken from django.utils.deprecation::MiddlewareMixin._async_check """ - if asyncio.iscoroutinefunction(self.get_response): - self._is_coroutine = asyncio.coroutines._is_coroutine # type: ignore + if iscoroutinefunction(self.get_response): + markcoroutinefunction(self) def async_route_check(self): # type: () -> bool @@ -190,7 +208,7 @@ def async_route_check(self): Function that checks if we are in async mode, and if we are forwards the handling of requests to __acall__ """ - return asyncio.iscoroutinefunction(self.get_response) + return iscoroutinefunction(self.get_response) async def __acall__(self, *args, **kwargs): # type: (*Any, **Any) -> Any diff --git a/sentry_sdk/integrations/huggingface_hub.py b/sentry_sdk/integrations/huggingface_hub.py new file mode 100644 index 0000000000..8e5f0e7339 --- /dev/null +++ b/sentry_sdk/integrations/huggingface_hub.py @@ -0,0 +1,173 @@ +from functools import wraps + +from sentry_sdk import consts +from sentry_sdk.ai.monitoring import record_token_usage +from sentry_sdk.ai.utils import set_data_normalized +from sentry_sdk.consts import SPANDATA + +from typing import Any, Iterable, Callable + +import sentry_sdk +from sentry_sdk.scope import should_send_default_pii +from sentry_sdk.integrations import DidNotEnable, Integration +from sentry_sdk.utils import ( + capture_internal_exceptions, + event_from_exception, + ensure_integration_enabled, +) + +try: + import huggingface_hub.inference._client + + from huggingface_hub import ChatCompletionStreamOutput, TextGenerationOutput +except ImportError: + raise DidNotEnable("Huggingface not installed") + + +class HuggingfaceHubIntegration(Integration): + identifier = "huggingface_hub" + + def __init__(self, include_prompts=True): + # type: (HuggingfaceHubIntegration, bool) -> None + self.include_prompts = include_prompts + + @staticmethod + def setup_once(): + # type: () -> None + huggingface_hub.inference._client.InferenceClient.text_generation = ( + _wrap_text_generation( + huggingface_hub.inference._client.InferenceClient.text_generation + ) + ) + + +def _capture_exception(exc): + # type: (Any) -> None + event, hint = event_from_exception( + exc, + client_options=sentry_sdk.get_client().options, + mechanism={"type": "huggingface_hub", "handled": False}, + ) + sentry_sdk.capture_event(event, hint=hint) + + +def _wrap_text_generation(f): + # type: (Callable[..., Any]) -> Callable[..., Any] + @wraps(f) + @ensure_integration_enabled(HuggingfaceHubIntegration, f) + def new_text_generation(*args, **kwargs): + # type: (*Any, **Any) -> Any + if "prompt" in kwargs: + prompt = kwargs["prompt"] + elif len(args) >= 2: + kwargs["prompt"] = args[1] + prompt = kwargs["prompt"] + args = (args[0],) + args[2:] + else: + # invalid call, let it return error + return f(*args, **kwargs) + + model = kwargs.get("model") + streaming = kwargs.get("stream") + + span = sentry_sdk.start_span( + op=consts.OP.HUGGINGFACE_HUB_CHAT_COMPLETIONS_CREATE, + description="Text Generation", + ) + span.__enter__() + try: + res = f(*args, **kwargs) + except Exception as e: + _capture_exception(e) + span.__exit__(None, None, None) + raise e from None + + integration = sentry_sdk.get_client().get_integration(HuggingfaceHubIntegration) + + with capture_internal_exceptions(): + if should_send_default_pii() and integration.include_prompts: + set_data_normalized(span, SPANDATA.AI_INPUT_MESSAGES, prompt) + + set_data_normalized(span, SPANDATA.AI_MODEL_ID, model) + set_data_normalized(span, SPANDATA.AI_STREAMING, streaming) + + if isinstance(res, str): + if should_send_default_pii() and integration.include_prompts: + set_data_normalized( + span, + "ai.responses", + [res], + ) + span.__exit__(None, None, None) + return res + + if isinstance(res, TextGenerationOutput): + if should_send_default_pii() and integration.include_prompts: + set_data_normalized( + span, + "ai.responses", + [res.generated_text], + ) + if res.details is not None and res.details.generated_tokens > 0: + record_token_usage(span, total_tokens=res.details.generated_tokens) + span.__exit__(None, None, None) + return res + + if not isinstance(res, Iterable): + # we only know how to deal with strings and iterables, ignore + set_data_normalized(span, "unknown_response", True) + span.__exit__(None, None, None) + return res + + if kwargs.get("details", False): + # res is Iterable[TextGenerationStreamOutput] + def new_details_iterator(): + # type: () -> Iterable[ChatCompletionStreamOutput] + with capture_internal_exceptions(): + tokens_used = 0 + data_buf: list[str] = [] + for x in res: + if hasattr(x, "token") and hasattr(x.token, "text"): + data_buf.append(x.token.text) + if hasattr(x, "details") and hasattr( + x.details, "generated_tokens" + ): + tokens_used = x.details.generated_tokens + yield x + if ( + len(data_buf) > 0 + and should_send_default_pii() + and integration.include_prompts + ): + set_data_normalized( + span, SPANDATA.AI_RESPONSES, "".join(data_buf) + ) + if tokens_used > 0: + record_token_usage(span, total_tokens=tokens_used) + span.__exit__(None, None, None) + + return new_details_iterator() + else: + # res is Iterable[str] + + def new_iterator(): + # type: () -> Iterable[str] + data_buf: list[str] = [] + with capture_internal_exceptions(): + for s in res: + if isinstance(s, str): + data_buf.append(s) + yield s + if ( + len(data_buf) > 0 + and should_send_default_pii() + and integration.include_prompts + ): + set_data_normalized( + span, SPANDATA.AI_RESPONSES, "".join(data_buf) + ) + span.__exit__(None, None, None) + + return new_iterator() + + return new_text_generation diff --git a/sentry_sdk/integrations/langchain.py b/sentry_sdk/integrations/langchain.py new file mode 100644 index 0000000000..c559870a86 --- /dev/null +++ b/sentry_sdk/integrations/langchain.py @@ -0,0 +1,457 @@ +from collections import OrderedDict +from functools import wraps + +import sentry_sdk +from sentry_sdk._types import TYPE_CHECKING +from sentry_sdk.ai.monitoring import set_ai_pipeline_name, record_token_usage +from sentry_sdk.consts import OP, SPANDATA +from sentry_sdk.ai.utils import set_data_normalized +from sentry_sdk.scope import should_send_default_pii +from sentry_sdk.tracing import Span + +if TYPE_CHECKING: + from typing import Any, List, Callable, Dict, Union, Optional + from uuid import UUID +from sentry_sdk.integrations import DidNotEnable, Integration +from sentry_sdk.utils import logger, capture_internal_exceptions + +try: + from langchain_core.messages import BaseMessage + from langchain_core.outputs import LLMResult + from langchain_core.callbacks import ( + manager, + BaseCallbackHandler, + ) + from langchain_core.agents import AgentAction, AgentFinish +except ImportError: + raise DidNotEnable("langchain not installed") + + +try: + import tiktoken # type: ignore + + enc = tiktoken.get_encoding("cl100k_base") + + def count_tokens(s): + # type: (str) -> int + return len(enc.encode_ordinary(s)) + + logger.debug("[langchain] using tiktoken to count tokens") +except ImportError: + logger.info( + "The Sentry Python SDK requires 'tiktoken' in order to measure token usage from streaming langchain calls." + "Please install 'tiktoken' if you aren't receiving accurate token usage in Sentry." + "See https://docs.sentry.io/platforms/python/integrations/langchain/ for more information." + ) + + def count_tokens(s): + # type: (str) -> int + return 1 + + +DATA_FIELDS = { + "temperature": SPANDATA.AI_TEMPERATURE, + "top_p": SPANDATA.AI_TOP_P, + "top_k": SPANDATA.AI_TOP_K, + "function_call": SPANDATA.AI_FUNCTION_CALL, + "tool_calls": SPANDATA.AI_TOOL_CALLS, + "tools": SPANDATA.AI_TOOLS, + "response_format": SPANDATA.AI_RESPONSE_FORMAT, + "logit_bias": SPANDATA.AI_LOGIT_BIAS, + "tags": SPANDATA.AI_TAGS, +} + +# To avoid double collecting tokens, we do *not* measure +# token counts for models for which we have an explicit integration +NO_COLLECT_TOKEN_MODELS = ["openai-chat"] # TODO add huggingface and anthropic + + +class LangchainIntegration(Integration): + identifier = "langchain" + + # The most number of spans (e.g., LLM calls) that can be processed at the same time. + max_spans = 1024 + + def __init__(self, include_prompts=True, max_spans=1024): + # type: (LangchainIntegration, bool, int) -> None + self.include_prompts = include_prompts + self.max_spans = max_spans + + @staticmethod + def setup_once(): + # type: () -> None + manager._configure = _wrap_configure(manager._configure) + + +class WatchedSpan: + span = None # type: Span + num_completion_tokens = 0 # type: int + num_prompt_tokens = 0 # type: int + no_collect_tokens = False # type: bool + children = [] # type: List[WatchedSpan] + is_pipeline = False # type: bool + + def __init__(self, span): + # type: (Span) -> None + self.span = span + + +class SentryLangchainCallback(BaseCallbackHandler): # type: ignore[misc] + """Base callback handler that can be used to handle callbacks from langchain.""" + + span_map = OrderedDict() # type: OrderedDict[UUID, WatchedSpan] + + max_span_map_size = 0 + + def __init__(self, max_span_map_size, include_prompts): + # type: (int, bool) -> None + self.max_span_map_size = max_span_map_size + self.include_prompts = include_prompts + + def gc_span_map(self): + # type: () -> None + + while len(self.span_map) > self.max_span_map_size: + run_id, watched_span = self.span_map.popitem(last=False) + self._exit_span(watched_span, run_id) + + def _handle_error(self, run_id, error): + # type: (UUID, Any) -> None + if not run_id or run_id not in self.span_map: + return + + span_data = self.span_map[run_id] + if not span_data: + return + sentry_sdk.capture_exception(error, span_data.span.scope) + span_data.span.__exit__(None, None, None) + del self.span_map[run_id] + + def _normalize_langchain_message(self, message): + # type: (BaseMessage) -> Any + parsed = {"content": message.content, "role": message.type} + parsed.update(message.additional_kwargs) + return parsed + + def _create_span(self, run_id, parent_id, **kwargs): + # type: (SentryLangchainCallback, UUID, Optional[Any], Any) -> WatchedSpan + + watched_span = None # type: Optional[WatchedSpan] + if parent_id: + parent_span = self.span_map[parent_id] # type: Optional[WatchedSpan] + if parent_span: + watched_span = WatchedSpan(parent_span.span.start_child(**kwargs)) + parent_span.children.append(watched_span) + if watched_span is None: + watched_span = WatchedSpan(sentry_sdk.start_span(**kwargs)) + + if kwargs.get("op", "").startswith("ai.pipeline."): + if kwargs.get("description"): + set_ai_pipeline_name(kwargs.get("description")) + watched_span.is_pipeline = True + + watched_span.span.__enter__() + self.span_map[run_id] = watched_span + self.gc_span_map() + return watched_span + + def _exit_span(self, span_data, run_id): + # type: (SentryLangchainCallback, WatchedSpan, UUID) -> None + + if span_data.is_pipeline: + set_ai_pipeline_name(None) + + span_data.span.__exit__(None, None, None) + del self.span_map[run_id] + + def on_llm_start( + self, + serialized, + prompts, + *, + run_id, + tags=None, + parent_run_id=None, + metadata=None, + **kwargs, + ): + # type: (SentryLangchainCallback, Dict[str, Any], List[str], UUID, Optional[List[str]], Optional[UUID], Optional[Dict[str, Any]], Any) -> Any + """Run when LLM starts running.""" + with capture_internal_exceptions(): + if not run_id: + return + all_params = kwargs.get("invocation_params", {}) + all_params.update(serialized.get("kwargs", {})) + watched_span = self._create_span( + run_id, + kwargs.get("parent_run_id"), + op=OP.LANGCHAIN_RUN, + description=kwargs.get("name") or "Langchain LLM call", + ) + span = watched_span.span + if should_send_default_pii() and self.include_prompts: + set_data_normalized(span, SPANDATA.AI_INPUT_MESSAGES, prompts) + for k, v in DATA_FIELDS.items(): + if k in all_params: + set_data_normalized(span, v, all_params[k]) + + def on_chat_model_start(self, serialized, messages, *, run_id, **kwargs): + # type: (SentryLangchainCallback, Dict[str, Any], List[List[BaseMessage]], UUID, Any) -> Any + """Run when Chat Model starts running.""" + with capture_internal_exceptions(): + if not run_id: + return + all_params = kwargs.get("invocation_params", {}) + all_params.update(serialized.get("kwargs", {})) + watched_span = self._create_span( + run_id, + kwargs.get("parent_run_id"), + op=OP.LANGCHAIN_CHAT_COMPLETIONS_CREATE, + description=kwargs.get("name") or "Langchain Chat Model", + ) + span = watched_span.span + model = all_params.get( + "model", all_params.get("model_name", all_params.get("model_id")) + ) + watched_span.no_collect_tokens = any( + x in all_params.get("_type", "") for x in NO_COLLECT_TOKEN_MODELS + ) + if not model and "anthropic" in all_params.get("_type"): + model = "claude-2" + if model: + span.set_data(SPANDATA.AI_MODEL_ID, model) + if should_send_default_pii() and self.include_prompts: + set_data_normalized( + span, + SPANDATA.AI_INPUT_MESSAGES, + [ + [self._normalize_langchain_message(x) for x in list_] + for list_ in messages + ], + ) + for k, v in DATA_FIELDS.items(): + if k in all_params: + set_data_normalized(span, v, all_params[k]) + if not watched_span.no_collect_tokens: + for list_ in messages: + for message in list_: + self.span_map[run_id].num_prompt_tokens += count_tokens( + message.content + ) + count_tokens(message.type) + + def on_llm_new_token(self, token, *, run_id, **kwargs): + # type: (SentryLangchainCallback, str, UUID, Any) -> Any + """Run on new LLM token. Only available when streaming is enabled.""" + with capture_internal_exceptions(): + if not run_id or run_id not in self.span_map: + return + span_data = self.span_map[run_id] + if not span_data or span_data.no_collect_tokens: + return + span_data.num_completion_tokens += count_tokens(token) + + def on_llm_end(self, response, *, run_id, **kwargs): + # type: (SentryLangchainCallback, LLMResult, UUID, Any) -> Any + """Run when LLM ends running.""" + with capture_internal_exceptions(): + if not run_id: + return + + token_usage = ( + response.llm_output.get("token_usage") if response.llm_output else None + ) + + span_data = self.span_map[run_id] + if not span_data: + return + + if should_send_default_pii() and self.include_prompts: + set_data_normalized( + span_data.span, + SPANDATA.AI_RESPONSES, + [[x.text for x in list_] for list_ in response.generations], + ) + + if not span_data.no_collect_tokens: + if token_usage: + record_token_usage( + span_data.span, + token_usage.get("prompt_tokens"), + token_usage.get("completion_tokens"), + token_usage.get("total_tokens"), + ) + else: + record_token_usage( + span_data.span, + span_data.num_prompt_tokens, + span_data.num_completion_tokens, + ) + + self._exit_span(span_data, run_id) + + def on_llm_error(self, error, *, run_id, **kwargs): + # type: (SentryLangchainCallback, Union[Exception, KeyboardInterrupt], UUID, Any) -> Any + """Run when LLM errors.""" + with capture_internal_exceptions(): + self._handle_error(run_id, error) + + def on_chain_start(self, serialized, inputs, *, run_id, **kwargs): + # type: (SentryLangchainCallback, Dict[str, Any], Dict[str, Any], UUID, Any) -> Any + """Run when chain starts running.""" + with capture_internal_exceptions(): + if not run_id: + return + watched_span = self._create_span( + run_id, + kwargs.get("parent_run_id"), + op=( + OP.LANGCHAIN_RUN + if kwargs.get("parent_run_id") is not None + else OP.LANGCHAIN_PIPELINE + ), + description=kwargs.get("name") or "Chain execution", + ) + metadata = kwargs.get("metadata") + if metadata: + set_data_normalized(watched_span.span, SPANDATA.AI_METADATA, metadata) + + def on_chain_end(self, outputs, *, run_id, **kwargs): + # type: (SentryLangchainCallback, Dict[str, Any], UUID, Any) -> Any + """Run when chain ends running.""" + with capture_internal_exceptions(): + if not run_id or run_id not in self.span_map: + return + + span_data = self.span_map[run_id] + if not span_data: + return + self._exit_span(span_data, run_id) + + def on_chain_error(self, error, *, run_id, **kwargs): + # type: (SentryLangchainCallback, Union[Exception, KeyboardInterrupt], UUID, Any) -> Any + """Run when chain errors.""" + self._handle_error(run_id, error) + + def on_agent_action(self, action, *, run_id, **kwargs): + # type: (SentryLangchainCallback, AgentAction, UUID, Any) -> Any + with capture_internal_exceptions(): + if not run_id: + return + watched_span = self._create_span( + run_id, + kwargs.get("parent_run_id"), + op=OP.LANGCHAIN_AGENT, + description=action.tool or "AI tool usage", + ) + if action.tool_input and should_send_default_pii() and self.include_prompts: + set_data_normalized( + watched_span.span, SPANDATA.AI_INPUT_MESSAGES, action.tool_input + ) + + def on_agent_finish(self, finish, *, run_id, **kwargs): + # type: (SentryLangchainCallback, AgentFinish, UUID, Any) -> Any + with capture_internal_exceptions(): + if not run_id: + return + + span_data = self.span_map[run_id] + if not span_data: + return + if should_send_default_pii() and self.include_prompts: + set_data_normalized( + span_data.span, SPANDATA.AI_RESPONSES, finish.return_values.items() + ) + self._exit_span(span_data, run_id) + + def on_tool_start(self, serialized, input_str, *, run_id, **kwargs): + # type: (SentryLangchainCallback, Dict[str, Any], str, UUID, Any) -> Any + """Run when tool starts running.""" + with capture_internal_exceptions(): + if not run_id: + return + watched_span = self._create_span( + run_id, + kwargs.get("parent_run_id"), + op=OP.LANGCHAIN_TOOL, + description=serialized.get("name") + or kwargs.get("name") + or "AI tool usage", + ) + if should_send_default_pii() and self.include_prompts: + set_data_normalized( + watched_span.span, + SPANDATA.AI_INPUT_MESSAGES, + kwargs.get("inputs", [input_str]), + ) + if kwargs.get("metadata"): + set_data_normalized( + watched_span.span, SPANDATA.AI_METADATA, kwargs.get("metadata") + ) + + def on_tool_end(self, output, *, run_id, **kwargs): + # type: (SentryLangchainCallback, str, UUID, Any) -> Any + """Run when tool ends running.""" + with capture_internal_exceptions(): + if not run_id or run_id not in self.span_map: + return + + span_data = self.span_map[run_id] + if not span_data: + return + if should_send_default_pii() and self.include_prompts: + set_data_normalized(span_data.span, SPANDATA.AI_RESPONSES, output) + self._exit_span(span_data, run_id) + + def on_tool_error(self, error, *args, run_id, **kwargs): + # type: (SentryLangchainCallback, Union[Exception, KeyboardInterrupt], UUID, Any) -> Any + """Run when tool errors.""" + self._handle_error(run_id, error) + + +def _wrap_configure(f): + # type: (Callable[..., Any]) -> Callable[..., Any] + + @wraps(f) + def new_configure(*args, **kwargs): + # type: (Any, Any) -> Any + + integration = sentry_sdk.get_client().get_integration(LangchainIntegration) + + with capture_internal_exceptions(): + new_callbacks = [] # type: List[BaseCallbackHandler] + if "local_callbacks" in kwargs: + existing_callbacks = kwargs["local_callbacks"] + kwargs["local_callbacks"] = new_callbacks + elif len(args) > 2: + existing_callbacks = args[2] + args = ( + args[0], + args[1], + new_callbacks, + ) + args[3:] + else: + existing_callbacks = [] + + if existing_callbacks: + if isinstance(existing_callbacks, list): + for cb in existing_callbacks: + new_callbacks.append(cb) + elif isinstance(existing_callbacks, BaseCallbackHandler): + new_callbacks.append(existing_callbacks) + else: + logger.warn("Unknown callback type: %s", existing_callbacks) + + already_added = False + for callback in new_callbacks: + if isinstance(callback, SentryLangchainCallback): + already_added = True + + if not already_added: + new_callbacks.append( + SentryLangchainCallback( + integration.max_spans, integration.include_prompts + ) + ) + return f(*args, **kwargs) + + return new_configure diff --git a/sentry_sdk/integrations/openai.py b/sentry_sdk/integrations/openai.py index 0d77a27ec0..20147b342f 100644 --- a/sentry_sdk/integrations/openai.py +++ b/sentry_sdk/integrations/openai.py @@ -2,6 +2,9 @@ from sentry_sdk import consts from sentry_sdk._types import TYPE_CHECKING +from sentry_sdk.ai.monitoring import record_token_usage +from sentry_sdk.consts import SPANDATA +from sentry_sdk.ai.utils import set_data_normalized if TYPE_CHECKING: from typing import Any, Iterable, List, Optional, Callable, Iterator @@ -48,11 +51,6 @@ def count_tokens(s): return 0 -COMPLETION_TOKENS_USED = "ai.completion_tоkens.used" -PROMPT_TOKENS_USED = "ai.prompt_tоkens.used" -TOTAL_TOKENS_USED = "ai.total_tоkens.used" - - class OpenAIIntegration(Integration): identifier = "openai" @@ -77,35 +75,13 @@ def _capture_exception(exc): sentry_sdk.capture_event(event, hint=hint) -def _normalize_data(data): - # type: (Any) -> Any - - # convert pydantic data (e.g. OpenAI v1+) to json compatible format - if hasattr(data, "model_dump"): - try: - return data.model_dump() - except Exception as e: - logger.warning("Could not convert pydantic data to JSON: %s", e) - return data - if isinstance(data, list): - return list(_normalize_data(x) for x in data) - if isinstance(data, dict): - return {k: _normalize_data(v) for (k, v) in data.items()} - return data - - -def set_data_normalized(span, key, value): - # type: (Span, str, Any) -> None - span.set_data(key, _normalize_data(value)) - - def _calculate_chat_completion_usage( messages, response, span, streaming_message_responses=None ): # type: (Iterable[ChatCompletionMessageParam], Any, Span, Optional[List[str]]) -> None - completion_tokens = 0 - prompt_tokens = 0 - total_tokens = 0 + completion_tokens = 0 # type: Optional[int] + prompt_tokens = 0 # type: Optional[int] + total_tokens = 0 # type: Optional[int] if hasattr(response, "usage"): if hasattr(response.usage, "completion_tokens") and isinstance( response.usage.completion_tokens, int @@ -134,15 +110,13 @@ def _calculate_chat_completion_usage( if hasattr(choice, "message"): completion_tokens += count_tokens(choice.message) + if prompt_tokens == 0: + prompt_tokens = None + if completion_tokens == 0: + completion_tokens = None if total_tokens == 0: - total_tokens = prompt_tokens + completion_tokens - - if completion_tokens != 0: - set_data_normalized(span, COMPLETION_TOKENS_USED, completion_tokens) - if prompt_tokens != 0: - set_data_normalized(span, PROMPT_TOKENS_USED, prompt_tokens) - if total_tokens != 0: - set_data_normalized(span, TOTAL_TOKENS_USED, total_tokens) + total_tokens = None + record_token_usage(span, prompt_tokens, completion_tokens, total_tokens) def _wrap_chat_completion_create(f): @@ -167,7 +141,8 @@ def new_chat_completion(*args, **kwargs): streaming = kwargs.get("stream") span = sentry_sdk.start_span( - op=consts.OP.OPENAI_CHAT_COMPLETIONS_CREATE, description="Chat Completion" + op=consts.OP.OPENAI_CHAT_COMPLETIONS_CREATE, + description="Chat Completion", ) span.__enter__() try: @@ -181,10 +156,10 @@ def new_chat_completion(*args, **kwargs): with capture_internal_exceptions(): if should_send_default_pii() and integration.include_prompts: - set_data_normalized(span, "ai.input_messages", messages) + set_data_normalized(span, SPANDATA.AI_INPUT_MESSAGES, messages) - set_data_normalized(span, "ai.model_id", model) - set_data_normalized(span, "ai.streaming", streaming) + set_data_normalized(span, SPANDATA.AI_MODEL_ID, model) + set_data_normalized(span, SPANDATA.AI_STREAMING, streaming) if hasattr(res, "choices"): if should_send_default_pii() and integration.include_prompts: @@ -224,7 +199,9 @@ def new_iterator(): should_send_default_pii() and integration.include_prompts ): - set_data_normalized(span, "ai.responses", all_responses) + set_data_normalized( + span, SPANDATA.AI_RESPONSES, all_responses + ) _calculate_chat_completion_usage( messages, res, span, all_responses ) @@ -285,11 +262,7 @@ def new_embeddings_create(*args, **kwargs): if prompt_tokens == 0: prompt_tokens = count_tokens(kwargs["input"] or "") - if total_tokens == 0: - total_tokens = prompt_tokens - - set_data_normalized(span, PROMPT_TOKENS_USED, prompt_tokens) - set_data_normalized(span, TOTAL_TOKENS_USED, total_tokens) + record_token_usage(span, prompt_tokens, None, total_tokens or prompt_tokens) return response diff --git a/sentry_sdk/integrations/quart.py b/sentry_sdk/integrations/quart.py index 7c2f4ade70..3fc34221d0 100644 --- a/sentry_sdk/integrations/quart.py +++ b/sentry_sdk/integrations/quart.py @@ -87,9 +87,11 @@ def patch_asgi_app(): # type: () -> None old_app = Quart.__call__ - @ensure_integration_enabled(QuartIntegration, old_app) async def sentry_patched_asgi_app(self, scope, receive, send): # type: (Any, Any, Any, Any) -> Any + if sentry_sdk.get_client().get_integration(QuartIntegration) is None: + return await old_app(self, scope, receive, send) + middleware = SentryAsgiMiddleware(lambda *a, **kw: old_app(self, *a, **kw)) middleware.__call__ = middleware._run_asgi3 return await middleware(scope, receive, send) diff --git a/sentry_sdk/scope.py b/sentry_sdk/scope.py index 58686d56ef..9cae308e5c 100644 --- a/sentry_sdk/scope.py +++ b/sentry_sdk/scope.py @@ -35,7 +35,7 @@ ) if TYPE_CHECKING: - from collections.abc import MutableMapping + from collections.abc import Mapping, MutableMapping from typing import Any from typing import Callable @@ -799,6 +799,25 @@ def set_tag(self, key, value): """ self._tags[key] = value + def set_tags(self, tags): + # type: (Mapping[str, object]) -> None + """Sets multiple tags at once. + + This method updates multiple tags at once. The tags are passed as a dictionary + or other mapping type. + + Calling this method is equivalent to calling `set_tag` on each key-value pair + in the mapping. If a tag key already exists in the scope, its value will be + updated. If the tag key does not exist in the scope, the key-value pair will + be added to the scope. + + This method only modifies tag keys in the `tags` mapping passed to the method. + `scope.set_tags({})` is, therefore, a no-op. + + :param tags: A mapping of tag keys to tag values to set. + """ + self._tags.update(tags) + def remove_tag(self, key): # type: (str) -> None """ diff --git a/sentry_sdk/tracing.py b/sentry_sdk/tracing.py index 6e82d839db..36aab2896c 100644 --- a/sentry_sdk/tracing.py +++ b/sentry_sdk/tracing.py @@ -12,7 +12,6 @@ ) from sentry_sdk._types import TYPE_CHECKING - if TYPE_CHECKING: from collections.abc import Callable, Mapping, MutableMapping from typing import Any @@ -32,7 +31,12 @@ R = TypeVar("R") import sentry_sdk.profiler - from sentry_sdk._types import Event, MeasurementUnit, SamplingContext + from sentry_sdk._types import ( + Event, + MeasurementUnit, + SamplingContext, + MeasurementValue, + ) class SpanKwargs(TypedDict, total=False): trace_id: str @@ -189,6 +193,7 @@ class Span: "sampled", "op", "description", + "_measurements", "start_timestamp", "_start_timestamp_monotonic_ns", "status", @@ -229,6 +234,7 @@ def __init__( self.status = status self.hub = hub self.scope = scope + self._measurements = {} # type: Dict[str, MeasurementValue] self._tags = {} # type: MutableMapping[str, str] self._data = {} # type: Dict[str, Any] self._containing_transaction = containing_transaction @@ -488,6 +494,10 @@ def set_status(self, value): # type: (str) -> None self.status = value + def set_measurement(self, name, value, unit=""): + # type: (str, float, MeasurementUnit) -> None + self._measurements[name] = {"value": value, "unit": unit} + def set_thread(self, thread_id, thread_name): # type: (Optional[int], Optional[str]) -> None @@ -598,6 +608,9 @@ def to_json(self): if metrics_summary: rv["_metrics_summary"] = metrics_summary + if len(self._measurements) > 0: + rv["measurements"] = self._measurements + tags = self._tags if tags: rv["tags"] = tags @@ -674,7 +687,7 @@ def __init__( self.source = source self.sample_rate = None # type: Optional[float] self.parent_sampled = parent_sampled - self._measurements = {} # type: Dict[str, Any] + self._measurements = {} # type: Dict[str, MeasurementValue] self._contexts = {} # type: Dict[str, Any] self._profile = None # type: Optional[sentry_sdk.profiler.Profile] self._baggage = baggage diff --git a/sentry_sdk/tracing_utils.py b/sentry_sdk/tracing_utils.py index 556a466c0b..fac51f4848 100644 --- a/sentry_sdk/tracing_utils.py +++ b/sentry_sdk/tracing_utils.py @@ -421,6 +421,16 @@ def update(self, other_dict): except AttributeError: pass + def __repr__(self): + # type: (...) -> str + return "".format( + self._trace_id, + self._span_id, + self.parent_span_id, + self.parent_sampled, + self.dynamic_sampling_context, + ) + class Baggage: """ diff --git a/setup.py b/setup.py index 037a621ddf..6a6917fbe0 100644 --- a/setup.py +++ b/setup.py @@ -21,7 +21,7 @@ def get_file_text(file_name): setup( name="sentry-sdk", - version="2.0.1", + version="2.1.1", author="Sentry Team and Contributors", author_email="hello@sentry.io", url="https://github.com/getsentry/sentry-python", @@ -44,6 +44,7 @@ def get_file_text(file_name): ], extras_require={ "aiohttp": ["aiohttp>=3.5"], + "anthropic": ["anthropic>=0.16"], "arq": ["arq>=0.23"], "asyncpg": ["asyncpg>=0.23"], "beam": ["apache-beam>=2.12"], @@ -59,6 +60,8 @@ def get_file_text(file_name): "grpcio": ["grpcio>=1.21.1"], "httpx": ["httpx>=0.16.0"], "huey": ["huey>=2"], + "huggingface_hub": ["huggingface_hub>=0.22"], + "langchain": ["langchain>=0.0.210"], "loguru": ["loguru>=0.5"], "openai": ["openai>=1.0.0", "tiktoken>=0.3.0"], "opentelemetry": ["opentelemetry-distro>=0.35b0"], diff --git a/tests/integrations/anthropic/__init__.py b/tests/integrations/anthropic/__init__.py new file mode 100644 index 0000000000..29ac4e6ff4 --- /dev/null +++ b/tests/integrations/anthropic/__init__.py @@ -0,0 +1,3 @@ +import pytest + +pytest.importorskip("anthropic") diff --git a/tests/integrations/anthropic/test_anthropic.py b/tests/integrations/anthropic/test_anthropic.py new file mode 100644 index 0000000000..10424771b6 --- /dev/null +++ b/tests/integrations/anthropic/test_anthropic.py @@ -0,0 +1,210 @@ +import pytest +from unittest import mock +from anthropic import Anthropic, Stream, AnthropicError +from anthropic.types import Usage, ContentBlock, MessageDeltaUsage, TextDelta +from anthropic.types.message import Message +from anthropic.types.message_start_event import MessageStartEvent +from anthropic.types.content_block_start_event import ContentBlockStartEvent +from anthropic.types.content_block_delta_event import ContentBlockDeltaEvent +from anthropic.types.content_block_stop_event import ContentBlockStopEvent +from anthropic.types.message_delta_event import MessageDeltaEvent, Delta + +from sentry_sdk import start_transaction +from sentry_sdk.consts import OP, SPANDATA +from sentry_sdk.integrations.anthropic import AnthropicIntegration + + +EXAMPLE_MESSAGE = Message( + id="id", + model="model", + role="assistant", + content=[ContentBlock(type="text", text="Hi, I'm Claude.")], + type="message", + usage=Usage(input_tokens=10, output_tokens=20), +) + + +@pytest.mark.parametrize( + "send_default_pii, include_prompts", + [ + (True, True), + (True, False), + (False, True), + (False, False), + ], +) +def test_nonstreaming_create_message( + sentry_init, capture_events, send_default_pii, include_prompts +): + sentry_init( + integrations=[AnthropicIntegration(include_prompts=include_prompts)], + traces_sample_rate=1.0, + send_default_pii=send_default_pii, + ) + events = capture_events() + client = Anthropic(api_key="z") + client.messages._post = mock.Mock(return_value=EXAMPLE_MESSAGE) + + messages = [ + { + "role": "user", + "content": "Hello, Claude", + } + ] + + with start_transaction(name="anthropic"): + response = client.messages.create( + max_tokens=1024, messages=messages, model="model" + ) + + assert response == EXAMPLE_MESSAGE + usage = response.usage + + assert usage.input_tokens == 10 + assert usage.output_tokens == 20 + + assert len(events) == 1 + (event,) = events + + assert event["type"] == "transaction" + assert event["transaction"] == "anthropic" + + assert len(event["spans"]) == 1 + (span,) = event["spans"] + + assert span["op"] == OP.ANTHROPIC_MESSAGES_CREATE + assert span["description"] == "Anthropic messages create" + assert span["data"][SPANDATA.AI_MODEL_ID] == "model" + + if send_default_pii and include_prompts: + assert span["data"][SPANDATA.AI_INPUT_MESSAGES] == messages + assert span["data"][SPANDATA.AI_RESPONSES] == [ + {"type": "text", "text": "Hi, I'm Claude."} + ] + else: + assert SPANDATA.AI_INPUT_MESSAGES not in span["data"] + assert SPANDATA.AI_RESPONSES not in span["data"] + + assert span["measurements"]["ai_prompt_tokens_used"]["value"] == 10 + assert span["measurements"]["ai_completion_tokens_used"]["value"] == 20 + assert span["measurements"]["ai_total_tokens_used"]["value"] == 30 + assert span["data"]["ai.streaming"] is False + + +@pytest.mark.parametrize( + "send_default_pii, include_prompts", + [ + (True, True), + (True, False), + (False, True), + (False, False), + ], +) +def test_streaming_create_message( + sentry_init, capture_events, send_default_pii, include_prompts +): + client = Anthropic(api_key="z") + returned_stream = Stream(cast_to=None, response=None, client=client) + returned_stream._iterator = [ + MessageStartEvent( + message=EXAMPLE_MESSAGE, + type="message_start", + ), + ContentBlockStartEvent( + type="content_block_start", + index=0, + content_block=ContentBlock(type="text", text=""), + ), + ContentBlockDeltaEvent( + delta=TextDelta(text="Hi", type="text_delta"), + index=0, + type="content_block_delta", + ), + ContentBlockDeltaEvent( + delta=TextDelta(text="!", type="text_delta"), + index=0, + type="content_block_delta", + ), + ContentBlockDeltaEvent( + delta=TextDelta(text=" I'm Claude!", type="text_delta"), + index=0, + type="content_block_delta", + ), + ContentBlockStopEvent(type="content_block_stop", index=0), + MessageDeltaEvent( + delta=Delta(), + usage=MessageDeltaUsage(output_tokens=10), + type="message_delta", + ), + ] + + sentry_init( + integrations=[AnthropicIntegration(include_prompts=include_prompts)], + traces_sample_rate=1.0, + send_default_pii=send_default_pii, + ) + events = capture_events() + client.messages._post = mock.Mock(return_value=returned_stream) + + messages = [ + { + "role": "user", + "content": "Hello, Claude", + } + ] + + with start_transaction(name="anthropic"): + message = client.messages.create( + max_tokens=1024, messages=messages, model="model", stream=True + ) + + for _ in message: + pass + + assert message == returned_stream + assert len(events) == 1 + (event,) = events + + assert event["type"] == "transaction" + assert event["transaction"] == "anthropic" + + assert len(event["spans"]) == 1 + (span,) = event["spans"] + + assert span["op"] == OP.ANTHROPIC_MESSAGES_CREATE + assert span["description"] == "Anthropic messages create" + assert span["data"][SPANDATA.AI_MODEL_ID] == "model" + + if send_default_pii and include_prompts: + assert span["data"][SPANDATA.AI_INPUT_MESSAGES] == messages + assert span["data"][SPANDATA.AI_RESPONSES] == [ + {"type": "text", "text": "Hi! I'm Claude!"} + ] + + else: + assert SPANDATA.AI_INPUT_MESSAGES not in span["data"] + assert SPANDATA.AI_RESPONSES not in span["data"] + + assert span["measurements"]["ai_prompt_tokens_used"]["value"] == 10 + assert span["measurements"]["ai_completion_tokens_used"]["value"] == 30 + assert span["measurements"]["ai_total_tokens_used"]["value"] == 40 + assert span["data"]["ai.streaming"] is True + + +def test_exception_message_create(sentry_init, capture_events): + sentry_init(integrations=[AnthropicIntegration()], traces_sample_rate=1.0) + events = capture_events() + + client = Anthropic(api_key="z") + client.messages._post = mock.Mock( + side_effect=AnthropicError("API rate limit reached") + ) + with pytest.raises(AnthropicError): + client.messages.create( + model="some-model", + messages=[{"role": "system", "content": "I'm throwing an exception"}], + max_tokens=1024, + ) + + (event,) = events + assert event["level"] == "error" diff --git a/tests/integrations/django/asgi/test_asgi.py b/tests/integrations/django/asgi/test_asgi.py index fd266c4fae..47e333cc37 100644 --- a/tests/integrations/django/asgi/test_asgi.py +++ b/tests/integrations/django/asgi/test_asgi.py @@ -1,5 +1,8 @@ import base64 +import sys import json +import inspect +import asyncio import os from unittest import mock @@ -8,6 +11,7 @@ from channels.testing import HttpCommunicator from sentry_sdk import capture_message from sentry_sdk.integrations.django import DjangoIntegration +from sentry_sdk.integrations.django.asgi import _asgi_middleware_mixin_factory from tests.integrations.django.myapp.asgi import channels_application try: @@ -526,3 +530,65 @@ async def test_asgi_request_body( assert event["request"]["data"] == expected_data else: assert "data" not in event["request"] + + +@pytest.mark.asyncio +@pytest.mark.skipif( + sys.version_info >= (3, 12), + reason=( + "asyncio.iscoroutinefunction has been replaced in 3.12 by inspect.iscoroutinefunction" + ), +) +async def test_asgi_mixin_iscoroutinefunction_before_3_12(): + sentry_asgi_mixin = _asgi_middleware_mixin_factory(lambda: None) + + async def get_response(): ... + + instance = sentry_asgi_mixin(get_response) + assert asyncio.iscoroutinefunction(instance) + + +@pytest.mark.skipif( + sys.version_info >= (3, 12), + reason=( + "asyncio.iscoroutinefunction has been replaced in 3.12 by inspect.iscoroutinefunction" + ), +) +def test_asgi_mixin_iscoroutinefunction_when_not_async_before_3_12(): + sentry_asgi_mixin = _asgi_middleware_mixin_factory(lambda: None) + + def get_response(): ... + + instance = sentry_asgi_mixin(get_response) + assert not asyncio.iscoroutinefunction(instance) + + +@pytest.mark.asyncio +@pytest.mark.skipif( + sys.version_info < (3, 12), + reason=( + "asyncio.iscoroutinefunction has been replaced in 3.12 by inspect.iscoroutinefunction" + ), +) +async def test_asgi_mixin_iscoroutinefunction_after_3_12(): + sentry_asgi_mixin = _asgi_middleware_mixin_factory(lambda: None) + + async def get_response(): ... + + instance = sentry_asgi_mixin(get_response) + assert inspect.iscoroutinefunction(instance) + + +@pytest.mark.skipif( + sys.version_info < (3, 12), + reason=( + "asyncio.iscoroutinefunction has been replaced in 3.12 by inspect.iscoroutinefunction" + ), +) +def test_asgi_mixin_iscoroutinefunction_when_not_async_after_3_12(): + sentry_asgi_mixin = _asgi_middleware_mixin_factory(lambda: None) + + def get_response(): ... + + instance = sentry_asgi_mixin(get_response) + assert not inspect.iscoroutinefunction(instance) diff --git a/tests/integrations/huggingface_hub/__init__.py b/tests/integrations/huggingface_hub/__init__.py new file mode 100644 index 0000000000..fe1fa0af50 --- /dev/null +++ b/tests/integrations/huggingface_hub/__init__.py @@ -0,0 +1,3 @@ +import pytest + +pytest.importorskip("huggingface_hub") diff --git a/tests/integrations/huggingface_hub/test_huggingface_hub.py b/tests/integrations/huggingface_hub/test_huggingface_hub.py new file mode 100644 index 0000000000..734778d08a --- /dev/null +++ b/tests/integrations/huggingface_hub/test_huggingface_hub.py @@ -0,0 +1,139 @@ +import itertools + +import pytest +from huggingface_hub import ( + InferenceClient, +) +from huggingface_hub.errors import OverloadedError + +from sentry_sdk import start_transaction +from sentry_sdk.integrations.huggingface_hub import HuggingfaceHubIntegration + +from unittest import mock # python 3.3 and above + + +@pytest.mark.parametrize( + "send_default_pii, include_prompts, details_arg", + itertools.product([True, False], repeat=3), +) +def test_nonstreaming_chat_completion( + sentry_init, capture_events, send_default_pii, include_prompts, details_arg +): + sentry_init( + integrations=[HuggingfaceHubIntegration(include_prompts=include_prompts)], + traces_sample_rate=1.0, + send_default_pii=send_default_pii, + ) + events = capture_events() + + client = InferenceClient("some-model") + if details_arg: + client.post = mock.Mock( + return_value=b"""[{ + "generated_text": "the model response", + "details": { + "finish_reason": "length", + "generated_tokens": 10, + "prefill": [], + "tokens": [] + } + }]""" + ) + else: + client.post = mock.Mock( + return_value=b'[{"generated_text": "the model response"}]' + ) + with start_transaction(name="huggingface_hub tx"): + response = client.text_generation( + prompt="hello", + details=details_arg, + stream=False, + ) + if details_arg: + assert response.generated_text == "the model response" + else: + assert response == "the model response" + tx = events[0] + assert tx["type"] == "transaction" + span = tx["spans"][0] + assert span["op"] == "ai.chat_completions.create.huggingface_hub" + + if send_default_pii and include_prompts: + assert "hello" in span["data"]["ai.input_messages"] + assert "the model response" in span["data"]["ai.responses"] + else: + assert "ai.input_messages" not in span["data"] + assert "ai.responses" not in span["data"] + + if details_arg: + assert span["measurements"]["ai_total_tokens_used"]["value"] == 10 + + +@pytest.mark.parametrize( + "send_default_pii, include_prompts, details_arg", + itertools.product([True, False], repeat=3), +) +def test_streaming_chat_completion( + sentry_init, capture_events, send_default_pii, include_prompts, details_arg +): + sentry_init( + integrations=[HuggingfaceHubIntegration(include_prompts=include_prompts)], + traces_sample_rate=1.0, + send_default_pii=send_default_pii, + ) + events = capture_events() + + client = InferenceClient("some-model") + client.post = mock.Mock( + return_value=[ + b"""data:{ + "token":{"id":1, "special": false, "text": "the model "} + }""", + b"""data:{ + "token":{"id":2, "special": false, "text": "response"}, + "details":{"finish_reason": "length", "generated_tokens": 10, "seed": 0} + }""", + ] + ) + with start_transaction(name="huggingface_hub tx"): + response = list( + client.text_generation( + prompt="hello", + details=details_arg, + stream=True, + ) + ) + assert len(response) == 2 + print(response) + if details_arg: + assert response[0].token.text + response[1].token.text == "the model response" + else: + assert response[0] + response[1] == "the model response" + + tx = events[0] + assert tx["type"] == "transaction" + span = tx["spans"][0] + assert span["op"] == "ai.chat_completions.create.huggingface_hub" + + if send_default_pii and include_prompts: + assert "hello" in span["data"]["ai.input_messages"] + assert "the model response" in span["data"]["ai.responses"] + else: + assert "ai.input_messages" not in span["data"] + assert "ai.responses" not in span["data"] + + if details_arg: + assert span["measurements"]["ai_total_tokens_used"]["value"] == 10 + + +def test_bad_chat_completion(sentry_init, capture_events): + sentry_init(integrations=[HuggingfaceHubIntegration()], traces_sample_rate=1.0) + events = capture_events() + + client = InferenceClient("some-model") + client.post = mock.Mock(side_effect=OverloadedError("The server is overloaded")) + with pytest.raises(OverloadedError): + client.text_generation(prompt="hello") + + (event,) = events + assert event["level"] == "error" diff --git a/tests/integrations/langchain/__init__.py b/tests/integrations/langchain/__init__.py new file mode 100644 index 0000000000..a286454a56 --- /dev/null +++ b/tests/integrations/langchain/__init__.py @@ -0,0 +1,3 @@ +import pytest + +pytest.importorskip("langchain_core") diff --git a/tests/integrations/langchain/test_langchain.py b/tests/integrations/langchain/test_langchain.py new file mode 100644 index 0000000000..6498cefbaf --- /dev/null +++ b/tests/integrations/langchain/test_langchain.py @@ -0,0 +1,223 @@ +from typing import List, Optional, Any, Iterator +from unittest.mock import Mock + +import pytest +from langchain_community.chat_models import ChatOpenAI +from langchain_core.callbacks import CallbackManagerForLLMRun +from langchain_core.messages import BaseMessage, AIMessageChunk +from langchain_core.outputs import ChatGenerationChunk + +from sentry_sdk import start_transaction +from sentry_sdk.integrations.langchain import LangchainIntegration +from langchain.agents import tool, AgentExecutor, create_openai_tools_agent +from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder + + +@tool +def get_word_length(word: str) -> int: + """Returns the length of a word.""" + return len(word) + + +global stream_result_mock # type: Mock +global llm_type # type: str + + +class MockOpenAI(ChatOpenAI): + def _stream( + self, + messages: List[BaseMessage], + stop: Optional[List[str]] = None, + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> Iterator[ChatGenerationChunk]: + for x in stream_result_mock(): + yield x + + @property + def _llm_type(self) -> str: + return llm_type + + +@pytest.mark.parametrize( + "send_default_pii, include_prompts, use_unknown_llm_type", + [ + (True, True, False), + (True, False, False), + (False, True, False), + (False, False, True), + ], +) +def test_langchain_agent( + sentry_init, capture_events, send_default_pii, include_prompts, use_unknown_llm_type +): + global llm_type + llm_type = "acme-llm" if use_unknown_llm_type else "openai-chat" + + sentry_init( + integrations=[LangchainIntegration(include_prompts=include_prompts)], + traces_sample_rate=1.0, + send_default_pii=send_default_pii, + ) + events = capture_events() + + prompt = ChatPromptTemplate.from_messages( + [ + ( + "system", + "You are very powerful assistant, but don't know current events", + ), + ("user", "{input}"), + MessagesPlaceholder(variable_name="agent_scratchpad"), + ] + ) + global stream_result_mock + stream_result_mock = Mock( + side_effect=[ + [ + ChatGenerationChunk( + type="ChatGenerationChunk", + message=AIMessageChunk( + content="", + additional_kwargs={ + "tool_calls": [ + { + "index": 0, + "id": "call_BbeyNhCKa6kYLYzrD40NGm3b", + "function": { + "arguments": "", + "name": "get_word_length", + }, + "type": "function", + } + ] + }, + ), + ), + ChatGenerationChunk( + type="ChatGenerationChunk", + message=AIMessageChunk( + content="", + additional_kwargs={ + "tool_calls": [ + { + "index": 0, + "id": None, + "function": { + "arguments": '{"word": "eudca"}', + "name": None, + }, + "type": None, + } + ] + }, + ), + ), + ChatGenerationChunk( + type="ChatGenerationChunk", + message=AIMessageChunk(content="5"), + generation_info={"finish_reason": "function_call"}, + ), + ], + [ + ChatGenerationChunk( + text="The word eudca has 5 letters.", + type="ChatGenerationChunk", + message=AIMessageChunk(content="The word eudca has 5 letters."), + ), + ChatGenerationChunk( + type="ChatGenerationChunk", + generation_info={"finish_reason": "stop"}, + message=AIMessageChunk(content=""), + ), + ], + ] + ) + llm = MockOpenAI( + model_name="gpt-3.5-turbo", + temperature=0, + openai_api_key="badkey", + ) + agent = create_openai_tools_agent(llm, [get_word_length], prompt) + + agent_executor = AgentExecutor(agent=agent, tools=[get_word_length], verbose=True) + + with start_transaction(): + list(agent_executor.stream({"input": "How many letters in the word eudca"})) + + tx = events[0] + assert tx["type"] == "transaction" + chat_spans = list( + x for x in tx["spans"] if x["op"] == "ai.chat_completions.create.langchain" + ) + tool_exec_span = next(x for x in tx["spans"] if x["op"] == "ai.tool.langchain") + + assert len(chat_spans) == 2 + + # We can't guarantee anything about the "shape" of the langchain execution graph + assert len(list(x for x in tx["spans"] if x["op"] == "ai.run.langchain")) > 0 + + if use_unknown_llm_type: + assert "ai_prompt_tokens_used" in chat_spans[0]["measurements"] + assert "ai_total_tokens_used" in chat_spans[0]["measurements"] + else: + # important: to avoid double counting, we do *not* measure + # tokens used if we have an explicit integration (e.g. OpenAI) + assert "measurements" not in chat_spans[0] + + if send_default_pii and include_prompts: + assert ( + "You are very powerful" + in chat_spans[0]["data"]["ai.input_messages"][0]["content"] + ) + assert "5" in chat_spans[0]["data"]["ai.responses"] + assert "word" in tool_exec_span["data"]["ai.input_messages"] + assert 5 == int(tool_exec_span["data"]["ai.responses"]) + assert ( + "You are very powerful" + in chat_spans[1]["data"]["ai.input_messages"][0]["content"] + ) + assert "5" in chat_spans[1]["data"]["ai.responses"] + else: + assert "ai.input_messages" not in chat_spans[0].get("data", {}) + assert "ai.responses" not in chat_spans[0].get("data", {}) + assert "ai.input_messages" not in chat_spans[1].get("data", {}) + assert "ai.responses" not in chat_spans[1].get("data", {}) + assert "ai.input_messages" not in tool_exec_span.get("data", {}) + assert "ai.responses" not in tool_exec_span.get("data", {}) + + +def test_langchain_error(sentry_init, capture_events): + sentry_init( + integrations=[LangchainIntegration(include_prompts=True)], + traces_sample_rate=1.0, + send_default_pii=True, + ) + events = capture_events() + + prompt = ChatPromptTemplate.from_messages( + [ + ( + "system", + "You are very powerful assistant, but don't know current events", + ), + ("user", "{input}"), + MessagesPlaceholder(variable_name="agent_scratchpad"), + ] + ) + global stream_result_mock + stream_result_mock = Mock(side_effect=Exception("API rate limit error")) + llm = MockOpenAI( + model_name="gpt-3.5-turbo", + temperature=0, + openai_api_key="badkey", + ) + agent = create_openai_tools_agent(llm, [get_word_length], prompt) + + agent_executor = AgentExecutor(agent=agent, tools=[get_word_length], verbose=True) + + with start_transaction(), pytest.raises(Exception): + list(agent_executor.stream({"input": "How many letters in the word eudca"})) + + error = events[0] + assert error["level"] == "error" diff --git a/tests/integrations/openai/test_openai.py b/tests/integrations/openai/test_openai.py index 074d859274..f14ae82333 100644 --- a/tests/integrations/openai/test_openai.py +++ b/tests/integrations/openai/test_openai.py @@ -7,12 +7,7 @@ from openai.types.create_embedding_response import Usage as EmbeddingTokenUsage from sentry_sdk import start_transaction -from sentry_sdk.integrations.openai import ( - OpenAIIntegration, - COMPLETION_TOKENS_USED, - PROMPT_TOKENS_USED, - TOTAL_TOKENS_USED, -) +from sentry_sdk.integrations.openai import OpenAIIntegration from unittest import mock # python 3.3 and above @@ -72,15 +67,15 @@ def test_nonstreaming_chat_completion( assert span["op"] == "ai.chat_completions.create.openai" if send_default_pii and include_prompts: - assert "hello" in span["data"]["ai.input_messages"][0]["content"] - assert "the model response" in span["data"]["ai.responses"][0]["content"] + assert "hello" in span["data"]["ai.input_messages"]["content"] + assert "the model response" in span["data"]["ai.responses"]["content"] else: assert "ai.input_messages" not in span["data"] assert "ai.responses" not in span["data"] - assert span["data"][COMPLETION_TOKENS_USED] == 10 - assert span["data"][PROMPT_TOKENS_USED] == 20 - assert span["data"][TOTAL_TOKENS_USED] == 30 + assert span["measurements"]["ai_completion_tokens_used"]["value"] == 10 + assert span["measurements"]["ai_prompt_tokens_used"]["value"] == 20 + assert span["measurements"]["ai_total_tokens_used"]["value"] == 30 # noinspection PyTypeChecker @@ -151,8 +146,8 @@ def test_streaming_chat_completion( assert span["op"] == "ai.chat_completions.create.openai" if send_default_pii and include_prompts: - assert "hello" in span["data"]["ai.input_messages"][0]["content"] - assert "hello world" in span["data"]["ai.responses"][0] + assert "hello" in span["data"]["ai.input_messages"]["content"] + assert "hello world" in span["data"]["ai.responses"] else: assert "ai.input_messages" not in span["data"] assert "ai.responses" not in span["data"] @@ -160,9 +155,9 @@ def test_streaming_chat_completion( try: import tiktoken # type: ignore # noqa # pylint: disable=unused-import - assert span["data"][COMPLETION_TOKENS_USED] == 2 - assert span["data"][PROMPT_TOKENS_USED] == 1 - assert span["data"][TOTAL_TOKENS_USED] == 3 + assert span["measurements"]["ai_completion_tokens_used"]["value"] == 2 + assert span["measurements"]["ai_prompt_tokens_used"]["value"] == 1 + assert span["measurements"]["ai_total_tokens_used"]["value"] == 3 except ImportError: pass # if tiktoken is not installed, we can't guarantee token usage will be calculated properly @@ -223,9 +218,9 @@ def test_embeddings_create( span = tx["spans"][0] assert span["op"] == "ai.embeddings.create.openai" if send_default_pii and include_prompts: - assert "hello" in span["data"]["ai.input_messages"][0] + assert "hello" in span["data"]["ai.input_messages"] else: assert "ai.input_messages" not in span["data"] - assert span["data"][PROMPT_TOKENS_USED] == 20 - assert span["data"][TOTAL_TOKENS_USED] == 30 + assert span["measurements"]["ai_prompt_tokens_used"]["value"] == 20 + assert span["measurements"]["ai_total_tokens_used"]["value"] == 30 diff --git a/tests/integrations/trytond/test_trytond.py b/tests/integrations/trytond/test_trytond.py index 870b6ccf96..f4ae81f3fa 100644 --- a/tests/integrations/trytond/test_trytond.py +++ b/tests/integrations/trytond/test_trytond.py @@ -11,7 +11,9 @@ from trytond.wsgi import app as trytond_app from werkzeug.test import Client + from sentry_sdk.integrations.trytond import TrytondWSGIIntegration +from tests.conftest import unpack_werkzeug_response @pytest.fixture(scope="function") @@ -118,8 +120,8 @@ def _(app, request, e): "/rpcerror", content_type="application/json", data=json.dumps(_data) ) - (content, status, headers) = response - data = json.loads(next(content)) + (content, status, headers) = unpack_werkzeug_response(response) + data = json.loads(content) assert status == "200 OK" assert headers.get("Content-Type") == "application/json" assert data == dict(id=42, error=["UserError", ["Sentry error.", "foo", None]]) diff --git a/tests/test_api.py b/tests/test_api.py index 738882f965..a6c44260d7 100644 --- a/tests/test_api.py +++ b/tests/test_api.py @@ -2,6 +2,7 @@ from unittest import mock from sentry_sdk import ( + capture_exception, continue_trace, get_baggage, get_client, @@ -9,6 +10,7 @@ get_traceparent, is_initialized, start_transaction, + set_tags, ) from sentry_sdk.client import Client, NonRecordingClient @@ -135,3 +137,45 @@ def test_get_client(): assert client is not None assert client.__class__ == NonRecordingClient assert not client.is_active() + + +def raise_and_capture(): + """Raise an exception and capture it. + + This is a utility function for test_set_tags. + """ + try: + 1 / 0 + except ZeroDivisionError: + capture_exception() + + +def test_set_tags(sentry_init, capture_events): + sentry_init() + events = capture_events() + + set_tags({"tag1": "value1", "tag2": "value2"}) + raise_and_capture() + + (*_, event) = events + assert event["tags"] == {"tag1": "value1", "tag2": "value2"}, "Setting tags failed" + + set_tags({"tag2": "updated", "tag3": "new"}) + raise_and_capture() + + (*_, event) = events + assert event["tags"] == { + "tag1": "value1", + "tag2": "updated", + "tag3": "new", + }, "Updating tags failed" + + set_tags({}) + raise_and_capture() + + (*_, event) = events + assert event["tags"] == { + "tag1": "value1", + "tag2": "updated", + "tag3": "new", + }, "Updating tags with empty dict changed tags" diff --git a/tests/test_scope.py b/tests/test_scope.py index d5910a8c1d..6162a8da2f 100644 --- a/tests/test_scope.py +++ b/tests/test_scope.py @@ -796,3 +796,29 @@ def test_should_send_default_pii_false(sentry_init): sentry_init(send_default_pii=False) assert should_send_default_pii() is False + + +def test_set_tags(): + scope = Scope() + scope.set_tags({"tag1": "value1", "tag2": "value2"}) + event = scope.apply_to_event({}, {}) + + assert event["tags"] == {"tag1": "value1", "tag2": "value2"}, "Setting tags failed" + + scope.set_tags({"tag2": "updated", "tag3": "new"}) + event = scope.apply_to_event({}, {}) + + assert event["tags"] == { + "tag1": "value1", + "tag2": "updated", + "tag3": "new", + }, "Updating tags failed" + + scope.set_tags({}) + event = scope.apply_to_event({}, {}) + + assert event["tags"] == { + "tag1": "value1", + "tag2": "updated", + "tag3": "new", + }, "Updating tags with empty dict changed tags" diff --git a/tox.ini b/tox.ini index e193de52b1..f1bc0e7a5e 100644 --- a/tox.ini +++ b/tox.ini @@ -29,6 +29,10 @@ envlist = {py3.7,py3.9,py3.11}-aiohttp-v{3.8} {py3.8,py3.11}-aiohttp-latest + # Anthropic + {py3.7,py3.11,py3.12}-anthropic-v{0.16,0.25} + {py3.7,py3.11,py3.12}-anthropic-latest + # Ariadne {py3.8,py3.11}-ariadne-v{0.20} {py3.8,py3.11,py3.12}-ariadne-latest @@ -140,6 +144,14 @@ envlist = {py3.6,py3.11,py3.12}-huey-v{2.0} {py3.6,py3.11,py3.12}-huey-latest + # Huggingface Hub + {py3.9,py3.11,py3.12}-huggingface_hub-{v0.22,latest} + + # Langchain + {py3.9,py3.11,py3.12}-langchain-0.1 + {py3.9,py3.11,py3.12}-langchain-latest + {py3.9,py3.11,py3.12}-langchain-notiktoken + # Loguru {py3.6,py3.11,py3.12}-loguru-v{0.5} {py3.6,py3.11,py3.12}-loguru-latest @@ -149,11 +161,6 @@ envlist = {py3.9,py3.11,py3.12}-openai-latest {py3.9,py3.11,py3.12}-openai-notiktoken - # OpenAI - {py3.9,py3.11,py3.12}-openai-v1 - {py3.9,py3.11,py3.12}-openai-latest - {py3.9,py3.11,py3.12}-openai-notiktoken - # OpenTelemetry (OTel) {py3.7,py3.9,py3.11,py3.12}-opentelemetry @@ -248,7 +255,7 @@ deps = # === Common === py3.8-common: hypothesis - {py3.6,py3.7,py3.8,py3.9,py3.10,py3.11,py3.12}-common: pytest-asyncio<=0.21.1 + {py3.6,py3.7,py3.8,py3.9,py3.10,py3.11,py3.12}-common: pytest-asyncio # See https://github.com/pytest-dev/pytest/issues/9621 # and https://github.com/pytest-dev/pytest-forked/issues/67 # for justification of the upper bound on pytest @@ -268,8 +275,13 @@ deps = aiohttp-v3.8: aiohttp~=3.8.0 aiohttp-latest: aiohttp aiohttp: pytest-aiohttp - aiohttp-v3.8: pytest-asyncio<=0.21.1 - aiohttp-latest: pytest-asyncio<=0.21.1 + aiohttp-v3.8: pytest-asyncio + aiohttp-latest: pytest-asyncio + + # Anthropic + anthropic-v0.25: anthropic~=0.25.0 + anthropic-v0.16: anthropic~=0.16.0 + anthropic-latest: anthropic # Ariadne ariadne-v0.20: ariadne~=0.20.0 @@ -283,17 +295,17 @@ deps = arq-v0.23: pydantic<2 arq-latest: arq arq: fakeredis>=2.2.0,<2.8 - arq: pytest-asyncio<=0.21.1 + arq: pytest-asyncio arq: async-timeout # Asgi - asgi: pytest-asyncio<=0.21.1 + asgi: pytest-asyncio asgi: async-asgi-testclient # Asyncpg asyncpg-v0.23: asyncpg~=0.23.0 asyncpg-latest: asyncpg - asyncpg: pytest-asyncio<=0.21.1 + asyncpg: pytest-asyncio # AWS Lambda aws_lambda: boto3 @@ -345,10 +357,10 @@ deps = django-v{1.11,2.0,2.2,3.0}: pytest-django<4.0 django-v{3.2,4.0,4.1,4.2,5.0}: pytest-django django-v{4.0,4.1,4.2,5.0}: djangorestframework - django-v{4.0,4.1,4.2,5.0}: pytest-asyncio<=0.21.1 + django-v{4.0,4.1,4.2,5.0}: pytest-asyncio django-v{4.0,4.1,4.2,5.0}: Werkzeug django-latest: djangorestframework - django-latest: pytest-asyncio<=0.21.1 + django-latest: pytest-asyncio django-latest: pytest-django django-latest: Werkzeug django-latest: channels[daphne] @@ -375,7 +387,7 @@ deps = fastapi: httpx # (this is a dependency of httpx) fastapi: anyio<4.0.0 - fastapi: pytest-asyncio<=0.21.1 + fastapi: pytest-asyncio fastapi: python-multipart fastapi: requests fastapi-v{0.79}: fastapi~=0.79.0 @@ -407,7 +419,7 @@ deps = grpc: protobuf grpc: mypy-protobuf grpc: types-protobuf - grpc: pytest-asyncio<=0.21.1 + grpc: pytest-asyncio grpc-v1.39: grpcio~=1.39.0 grpc-v1.49: grpcio~=1.49.1 grpc-v1.59: grpcio~=1.59.0 @@ -437,6 +449,18 @@ deps = huey-v2.0: huey~=2.0.0 huey-latest: huey + # Huggingface Hub + huggingface_hub-v0.22: huggingface_hub~=0.22.2 + huggingface_hub-latest: huggingface_hub + + # Langchain + langchain: openai~=1.0.0 + langchain-0.1: langchain~=0.1.11 + langchain-0.1: tiktoken~=0.6.0 + langchain-latest: langchain + langchain-latest: tiktoken~=0.6.0 + langchain-notiktoken: langchain + # Loguru loguru-v0.5: loguru~=0.5.0 loguru-latest: loguru @@ -472,7 +496,7 @@ deps = # Quart quart: quart-auth - quart: pytest-asyncio<=0.21.1 + quart: pytest-asyncio quart-v0.16: blinker<1.6 quart-v0.16: jinja2<3.1.0 quart-v0.16: Werkzeug<2.1.0 @@ -485,7 +509,7 @@ deps = # Redis redis: fakeredis!=1.7.4 redis: pytest<8.0.0 - {py3.7,py3.8,py3.9,py3.10,py3.11}-redis: pytest-asyncio<=0.21.1 + {py3.7,py3.8,py3.9,py3.10,py3.11}-redis: pytest-asyncio redis-v3: redis~=3.0 redis-v4: redis~=4.0 redis-v5: redis~=5.0 @@ -526,7 +550,7 @@ deps = sanic-latest: sanic # Starlette - starlette: pytest-asyncio<=0.21.1 + starlette: pytest-asyncio starlette: python-multipart starlette: requests starlette: httpx @@ -541,7 +565,7 @@ deps = starlette-latest: starlette # Starlite - starlite: pytest-asyncio<=0.21.1 + starlite: pytest-asyncio starlite: python-multipart starlite: requests starlite: cryptography @@ -563,27 +587,27 @@ deps = strawberry-latest: strawberry-graphql[fastapi,flask] # Tornado + tornado: pytest<8.2 tornado-v6.0: tornado~=6.0.0 tornado-v6: tornado~=6.0 tornado-latest: tornado # Trytond + trytond: werkzeug + trytond-v4: werkzeug<1.0 trytond-v4: trytond~=4.0 trytond-v5: trytond~=5.0 trytond-v6: trytond~=6.0 trytond-v7: trytond~=7.0 trytond-latest: trytond - trytond-v{4}: werkzeug<1.0 - trytond-v{5,6,7}: werkzeug<2.0 - trytond-latest: werkzeug<2.0 - setenv = PYTHONDONTWRITEBYTECODE=1 OBJC_DISABLE_INITIALIZE_FORK_SAFETY=YES common: TESTPATH=tests gevent: TESTPATH=tests aiohttp: TESTPATH=tests/integrations/aiohttp + anthropic: TESTPATH=tests/integrations/anthropic ariadne: TESTPATH=tests/integrations/ariadne arq: TESTPATH=tests/integrations/arq asgi: TESTPATH=tests/integrations/asgi @@ -605,6 +629,8 @@ setenv = graphene: TESTPATH=tests/integrations/graphene httpx: TESTPATH=tests/integrations/httpx huey: TESTPATH=tests/integrations/huey + huggingface_hub: TESTPATH=tests/integrations/huggingface_hub + langchain: TESTPATH=tests/integrations/langchain loguru: TESTPATH=tests/integrations/loguru openai: TESTPATH=tests/integrations/openai opentelemetry: TESTPATH=tests/integrations/opentelemetry