diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 34815da549..6197f9023d 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -20,7 +20,7 @@ jobs: steps: - name: Get auth token id: token - uses: actions/create-github-app-token@df432ceedc7162793a195dd1713ff69aefc7379e # v2.0.6 + uses: actions/create-github-app-token@0f859bf9e69e887678d5bbfbee594437cb440ffe # v2.1.0 with: app-id: ${{ vars.SENTRY_RELEASE_BOT_CLIENT_ID }} private-key: ${{ secrets.SENTRY_RELEASE_BOT_PRIVATE_KEY }} diff --git a/.github/workflows/test-integrations-ai.yml b/.github/workflows/test-integrations-ai.yml index 2777810a8f..dd57f5909b 100644 --- a/.github/workflows/test-integrations-ai.yml +++ b/.github/workflows/test-integrations-ai.yml @@ -92,7 +92,7 @@ jobs: token: ${{ secrets.CODECOV_TOKEN }} files: coverage.xml # make sure no plugins alter our coverage reports - plugin: noop + plugins: noop verbose: true - name: Upload test results to Codecov if: ${{ !cancelled() }} @@ -171,7 +171,7 @@ jobs: token: ${{ secrets.CODECOV_TOKEN }} files: coverage.xml # make sure no plugins alter our coverage reports - plugin: noop + plugins: noop verbose: true - name: Upload test results to Codecov if: ${{ !cancelled() }} diff --git a/.github/workflows/test-integrations-cloud.yml b/.github/workflows/test-integrations-cloud.yml index 6a9b9df0de..e79c9513ef 100644 --- a/.github/workflows/test-integrations-cloud.yml +++ b/.github/workflows/test-integrations-cloud.yml @@ -92,7 +92,7 @@ jobs: token: ${{ secrets.CODECOV_TOKEN }} files: coverage.xml # make sure no plugins alter our coverage reports - plugin: noop + plugins: noop verbose: true - name: Upload test results to Codecov if: ${{ !cancelled() }} @@ -171,7 +171,7 @@ jobs: token: ${{ secrets.CODECOV_TOKEN }} files: coverage.xml # make sure no plugins alter our coverage reports - plugin: noop + plugins: noop verbose: true - name: Upload test results to Codecov if: ${{ !cancelled() }} diff --git a/.github/workflows/test-integrations-common.yml b/.github/workflows/test-integrations-common.yml index 2ceb23b79c..c7e356420c 100644 --- a/.github/workflows/test-integrations-common.yml +++ b/.github/workflows/test-integrations-common.yml @@ -72,7 +72,7 @@ jobs: token: ${{ secrets.CODECOV_TOKEN }} files: coverage.xml # make sure no plugins alter our coverage reports - plugin: noop + plugins: noop verbose: true - name: Upload test results to Codecov if: ${{ !cancelled() }} diff --git a/.github/workflows/test-integrations-dbs.yml b/.github/workflows/test-integrations-dbs.yml index 1ad39421d6..6c203379fe 100644 --- a/.github/workflows/test-integrations-dbs.yml +++ b/.github/workflows/test-integrations-dbs.yml @@ -112,7 +112,7 @@ jobs: token: ${{ secrets.CODECOV_TOKEN }} files: coverage.xml # make sure no plugins alter our coverage reports - plugin: noop + plugins: noop verbose: true - name: Upload test results to Codecov if: ${{ !cancelled() }} @@ -211,7 +211,7 @@ jobs: token: ${{ secrets.CODECOV_TOKEN }} files: coverage.xml # make sure no plugins alter our coverage reports - plugin: noop + plugins: noop verbose: true - name: Upload test results to Codecov if: ${{ !cancelled() }} diff --git a/.github/workflows/test-integrations-flags.yml b/.github/workflows/test-integrations-flags.yml index d6da6c8acd..926465990d 100644 --- a/.github/workflows/test-integrations-flags.yml +++ b/.github/workflows/test-integrations-flags.yml @@ -84,7 +84,7 @@ jobs: token: ${{ secrets.CODECOV_TOKEN }} files: coverage.xml # make sure no plugins alter our coverage reports - plugin: noop + plugins: noop verbose: true - name: Upload test results to Codecov if: ${{ !cancelled() }} diff --git a/.github/workflows/test-integrations-gevent.yml b/.github/workflows/test-integrations-gevent.yml index c0bd099e45..a08e91c909 100644 --- a/.github/workflows/test-integrations-gevent.yml +++ b/.github/workflows/test-integrations-gevent.yml @@ -72,7 +72,7 @@ jobs: token: ${{ secrets.CODECOV_TOKEN }} files: coverage.xml # make sure no plugins alter our coverage reports - plugin: noop + plugins: noop verbose: true - name: Upload test results to Codecov if: ${{ !cancelled() }} diff --git a/.github/workflows/test-integrations-graphql.yml b/.github/workflows/test-integrations-graphql.yml index e851dfc9bb..9bbeee6c6a 100644 --- a/.github/workflows/test-integrations-graphql.yml +++ b/.github/workflows/test-integrations-graphql.yml @@ -84,7 +84,7 @@ jobs: token: ${{ secrets.CODECOV_TOKEN }} files: coverage.xml # make sure no plugins alter our coverage reports - plugin: noop + plugins: noop verbose: true - name: Upload test results to Codecov if: ${{ !cancelled() }} diff --git a/.github/workflows/test-integrations-misc.yml b/.github/workflows/test-integrations-misc.yml index 8a2e87c9ca..3595640ce1 100644 --- a/.github/workflows/test-integrations-misc.yml +++ b/.github/workflows/test-integrations-misc.yml @@ -92,7 +92,7 @@ jobs: token: ${{ secrets.CODECOV_TOKEN }} files: coverage.xml # make sure no plugins alter our coverage reports - plugin: noop + plugins: noop verbose: true - name: Upload test results to Codecov if: ${{ !cancelled() }} diff --git a/.github/workflows/test-integrations-network.yml b/.github/workflows/test-integrations-network.yml index 47ae674934..3ac5508dab 100644 --- a/.github/workflows/test-integrations-network.yml +++ b/.github/workflows/test-integrations-network.yml @@ -80,7 +80,7 @@ jobs: token: ${{ secrets.CODECOV_TOKEN }} files: coverage.xml # make sure no plugins alter our coverage reports - plugin: noop + plugins: noop verbose: true - name: Upload test results to Codecov if: ${{ !cancelled() }} @@ -147,7 +147,7 @@ jobs: token: ${{ secrets.CODECOV_TOKEN }} files: coverage.xml # make sure no plugins alter our coverage reports - plugin: noop + plugins: noop verbose: true - name: Upload test results to Codecov if: ${{ !cancelled() }} diff --git a/.github/workflows/test-integrations-tasks.yml b/.github/workflows/test-integrations-tasks.yml index 6b3fcab41f..13c34224be 100644 --- a/.github/workflows/test-integrations-tasks.yml +++ b/.github/workflows/test-integrations-tasks.yml @@ -107,7 +107,7 @@ jobs: token: ${{ secrets.CODECOV_TOKEN }} files: coverage.xml # make sure no plugins alter our coverage reports - plugin: noop + plugins: noop verbose: true - name: Upload test results to Codecov if: ${{ !cancelled() }} @@ -201,7 +201,7 @@ jobs: token: ${{ secrets.CODECOV_TOKEN }} files: coverage.xml # make sure no plugins alter our coverage reports - plugin: noop + plugins: noop verbose: true - name: Upload test results to Codecov if: ${{ !cancelled() }} diff --git a/.github/workflows/test-integrations-web-1.yml b/.github/workflows/test-integrations-web-1.yml index 3b48472d5e..e52a903208 100644 --- a/.github/workflows/test-integrations-web-1.yml +++ b/.github/workflows/test-integrations-web-1.yml @@ -102,7 +102,7 @@ jobs: token: ${{ secrets.CODECOV_TOKEN }} files: coverage.xml # make sure no plugins alter our coverage reports - plugin: noop + plugins: noop verbose: true - name: Upload test results to Codecov if: ${{ !cancelled() }} diff --git a/.github/workflows/test-integrations-web-2.yml b/.github/workflows/test-integrations-web-2.yml index b98e5f02fc..c703cfafce 100644 --- a/.github/workflows/test-integrations-web-2.yml +++ b/.github/workflows/test-integrations-web-2.yml @@ -108,7 +108,7 @@ jobs: token: ${{ secrets.CODECOV_TOKEN }} files: coverage.xml # make sure no plugins alter our coverage reports - plugin: noop + plugins: noop verbose: true - name: Upload test results to Codecov if: ${{ !cancelled() }} @@ -203,7 +203,7 @@ jobs: token: ${{ secrets.CODECOV_TOKEN }} files: coverage.xml # make sure no plugins alter our coverage reports - plugin: noop + plugins: noop verbose: true - name: Upload test results to Codecov if: ${{ !cancelled() }} diff --git a/CHANGELOG.md b/CHANGELOG.md index 21b1d5fec9..6e06e61e32 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,29 @@ # Changelog +## 2.35.0 + +### Various fixes & improvements + +- [Langchain Integration](https://docs.sentry.io/platforms/python/integrations/langchain/) now supports the Sentry [AI dashboard](https://docs.sentry.io/product/insights/ai/agents/dashboard/). (#4678) by @shellmayr +- [Anthropic Integration](https://docs.sentry.io/platforms/python/integrations/anthropic/) now supports the Sentry [AI dashboard](https://docs.sentry.io/product/insights/ai/agents/dashboard/). (#4674) by @constantinius +- AI Agents templates for `@trace` decorator (#4676) by @antonpirker +- Sentry Logs: Add `enable_logs`, `before_send_log` as top-level `sentry_sdk.init()` options (#4644) by @sentrivana +- Tracing: Improve `@trace` decorator. Allows to set `span.op`, `span.name`, and `span.attributes` (#4648) by @antonpirker +- Tracing: Add convenience function `sentry_sdk.update_current_span`. (#4673) by @antonpirker +- Tracing: Add `Span.update_data()` to update multiple `span.data` items at once. (#4666) by @antonpirker +- GNU-integration: make path optional (#4688) by @MeredithAnya +- Clickhouse: Don't eat the generator data (#4669) by @szokeasaurusrex +- Clickhouse: List `send_data` parameters (#4667) by @szokeasaurusrex +- Update `gen_ai.*` and `ai.*` attributes (#4665) by @antonpirker +- Better checking for empty tools list (#4647) by @antonpirker +- Remove performance paper cuts (#4675) by @sentrivana +- Help for debugging Cron problems (#4686) by @antonpirker +- Fix Redis CI (#4691) by @sentrivana +- Fix plugins key codecov (#4655) by @sl0thentr0py +- Fix Mypy (#4649) by @sentrivana +- Update tox.ini (#4689) by @sentrivana +- build(deps): bump actions/create-github-app-token from 2.0.6 to 2.1.0 (#4684) by @dependabot + ## 2.34.1 ### Various fixes & improvements diff --git a/docs/api.rst b/docs/api.rst index a6fb49346d..802abee75d 100644 --- a/docs/api.rst +++ b/docs/api.rst @@ -37,10 +37,12 @@ Enriching Events Performance Monitoring ====================== +.. autofunction:: sentry_sdk.api.trace .. autofunction:: sentry_sdk.api.continue_trace .. autofunction:: sentry_sdk.api.get_current_span .. autofunction:: sentry_sdk.api.start_span .. autofunction:: sentry_sdk.api.start_transaction +.. autofunction:: sentry_sdk.api.update_current_span Distributed Tracing diff --git a/docs/conf.py b/docs/conf.py index f5d0b9e121..465e29a4e8 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -31,7 +31,7 @@ copyright = "2019-{}, Sentry Team and Contributors".format(datetime.now().year) author = "Sentry Team and Contributors" -release = "2.34.1" +release = "2.35.0" version = ".".join(release.split(".")[:2]) # The short X.Y version. diff --git a/pyproject.toml b/pyproject.toml index e5eae2c21f..deba247e39 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -126,6 +126,10 @@ ignore_missing_imports = true module = "langchain_core.*" ignore_missing_imports = true +[[tool.mypy.overrides]] +module = "langchain.*" +ignore_missing_imports = true + [[tool.mypy.overrides]] module = "executing.*" ignore_missing_imports = true diff --git a/scripts/populate_tox/tox.jinja b/scripts/populate_tox/tox.jinja index c67f4127d5..4c3b86af81 100644 --- a/scripts/populate_tox/tox.jinja +++ b/scripts/populate_tox/tox.jinja @@ -300,6 +300,7 @@ deps = {py3.7,py3.8,py3.9,py3.10,py3.11,py3.12,py3.13}-redis: pytest-asyncio redis-v3: redis~=3.0 redis-v4: redis~=4.0 + redis-v4: fakeredis<2.31.0 redis-v5: redis~=5.0 redis-latest: redis diff --git a/scripts/split_tox_gh_actions/templates/test_group.jinja b/scripts/split_tox_gh_actions/templates/test_group.jinja index 44f51f473e..96faefc54e 100644 --- a/scripts/split_tox_gh_actions/templates/test_group.jinja +++ b/scripts/split_tox_gh_actions/templates/test_group.jinja @@ -105,7 +105,7 @@ token: {% raw %}${{ secrets.CODECOV_TOKEN }}{% endraw %} files: coverage.xml # make sure no plugins alter our coverage reports - plugin: noop + plugins: noop verbose: true - name: Upload test results to Codecov diff --git a/sentry_sdk/__init__.py b/sentry_sdk/__init__.py index 7b1eda172a..a37b52ff4e 100644 --- a/sentry_sdk/__init__.py +++ b/sentry_sdk/__init__.py @@ -50,6 +50,7 @@ "start_session", "end_session", "set_transaction_name", + "update_current_span", ] # Initialize the debug support after everything is loaded diff --git a/sentry_sdk/ai/utils.py b/sentry_sdk/ai/utils.py index a3c62600c0..cf52cba6e8 100644 --- a/sentry_sdk/ai/utils.py +++ b/sentry_sdk/ai/utils.py @@ -7,8 +7,8 @@ from sentry_sdk.utils import logger -def _normalize_data(data): - # type: (Any) -> Any +def _normalize_data(data, unpack=True): + # type: (Any, bool) -> Any # convert pydantic data (e.g. OpenAI v1+) to json compatible format if hasattr(data, "model_dump"): @@ -18,18 +18,18 @@ def _normalize_data(data): logger.warning("Could not convert pydantic data to JSON: %s", e) return data if isinstance(data, list): - if len(data) == 1: - return _normalize_data(data[0]) # remove empty dimensions - return list(_normalize_data(x) for x in data) + if unpack and len(data) == 1: + return _normalize_data(data[0], unpack=unpack) # remove empty dimensions + return list(_normalize_data(x, unpack=unpack) for x in data) if isinstance(data, dict): - return {k: _normalize_data(v) for (k, v) in data.items()} + return {k: _normalize_data(v, unpack=unpack) for (k, v) in data.items()} return data -def set_data_normalized(span, key, value): - # type: (Span, str, Any) -> None - normalized = _normalize_data(value) +def set_data_normalized(span, key, value, unpack=True): + # type: (Span, str, Any, bool) -> None + normalized = _normalize_data(value, unpack=unpack) if isinstance(normalized, (int, float, bool, str)): span.set_data(key, normalized) else: diff --git a/sentry_sdk/api.py b/sentry_sdk/api.py index a4fb95e9a1..43758b4d78 100644 --- a/sentry_sdk/api.py +++ b/sentry_sdk/api.py @@ -85,6 +85,7 @@ def overload(x): "start_session", "end_session", "set_transaction_name", + "update_current_span", ] @@ -473,3 +474,82 @@ def end_session(): def set_transaction_name(name, source=None): # type: (str, Optional[str]) -> None return get_current_scope().set_transaction_name(name, source) + + +def update_current_span(op=None, name=None, attributes=None, data=None): + # type: (Optional[str], Optional[str], Optional[dict[str, Union[str, int, float, bool]]], Optional[dict[str, Any]]) -> None + """ + Update the current active span with the provided parameters. + + This function allows you to modify properties of the currently active span. + If no span is currently active, this function will do nothing. + + :param op: The operation name for the span. This is a high-level description + of what the span represents (e.g., "http.client", "db.query"). + You can use predefined constants from :py:class:`sentry_sdk.consts.OP` + or provide your own string. If not provided, the span's operation will + remain unchanged. + :type op: str or None + + :param name: The human-readable name/description for the span. This provides + more specific details about what the span represents (e.g., "GET /api/users", + "SELECT * FROM users"). If not provided, the span's name will remain unchanged. + :type name: str or None + + :param data: A dictionary of key-value pairs to add as data to the span. This + data will be merged with any existing span data. If not provided, + no data will be added. + + .. deprecated:: 2.35.0 + Use ``attributes`` instead. The ``data`` parameter will be removed + in a future version. + :type data: dict[str, Union[str, int, float, bool]] or None + + :param attributes: A dictionary of key-value pairs to add as attributes to the span. + Attribute values must be strings, integers, floats, or booleans. These + attributes will be merged with any existing span data. If not provided, + no attributes will be added. + :type attributes: dict[str, Union[str, int, float, bool]] or None + + :returns: None + + .. versionadded:: 2.35.0 + + Example:: + + import sentry_sdk + from sentry_sdk.consts import OP + + sentry_sdk.update_current_span( + op=OP.FUNCTION, + name="process_user_data", + attributes={"user_id": 123, "batch_size": 50} + ) + """ + current_span = get_current_span() + + if current_span is None: + return + + if op is not None: + current_span.op = op + + if name is not None: + # internally it is still description + current_span.description = name + + if data is not None and attributes is not None: + raise ValueError( + "Cannot provide both `data` and `attributes`. Please use only `attributes`." + ) + + if data is not None: + warnings.warn( + "The `data` parameter is deprecated. Please use `attributes` instead.", + DeprecationWarning, + stacklevel=2, + ) + attributes = data + + if attributes is not None: + current_span.update_data(attributes) diff --git a/sentry_sdk/client.py b/sentry_sdk/client.py index dca39beab8..5d584a5537 100644 --- a/sentry_sdk/client.py +++ b/sentry_sdk/client.py @@ -23,6 +23,8 @@ handle_in_app, is_gevent, logger, + get_before_send_log, + has_logs_enabled, ) from sentry_sdk.serializer import serialize from sentry_sdk.tracing import trace @@ -382,7 +384,8 @@ def _capture_envelope(envelope): ) self.log_batcher = None - if experiments.get("enable_logs", False): + + if has_logs_enabled(self.options): from sentry_sdk._log_batcher import LogBatcher self.log_batcher = LogBatcher(capture_func=_capture_envelope) @@ -898,9 +901,8 @@ def capture_event( return return_value def _capture_experimental_log(self, log): - # type: (Log) -> None - logs_enabled = self.options["_experiments"].get("enable_logs", False) - if not logs_enabled: + # type: (Optional[Log]) -> None + if not has_logs_enabled(self.options) or log is None: return current_scope = sentry_sdk.get_current_scope() @@ -955,9 +957,10 @@ def _capture_experimental_log(self, log): f'[Sentry Logs] [{log.get("severity_text")}] {log.get("body")}' ) - before_send_log = self.options["_experiments"].get("before_send_log") + before_send_log = get_before_send_log(self.options) if before_send_log is not None: log = before_send_log(log, {}) + if log is None: return diff --git a/sentry_sdk/consts.py b/sentry_sdk/consts.py index 3ae33b6a94..f307e526af 100644 --- a/sentry_sdk/consts.py +++ b/sentry_sdk/consts.py @@ -100,6 +100,17 @@ class CompressionAlgo(Enum): ] +class SPANTEMPLATE(str, Enum): + DEFAULT = "default" + AI_AGENT = "ai_agent" + AI_TOOL = "ai_tool" + AI_CHAT = "ai_chat" + + def __str__(self): + # type: () -> str + return self.value + + class INSTRUMENTER: SENTRY = "sentry" OTEL = "otel" @@ -113,71 +124,106 @@ class SPANDATA: AI_CITATIONS = "ai.citations" """ + .. deprecated:: + This attribute is deprecated. Use GEN_AI_* attributes instead. + References or sources cited by the AI model in its response. Example: ["Smith et al. 2020", "Jones 2019"] """ AI_DOCUMENTS = "ai.documents" """ + .. deprecated:: + This attribute is deprecated. Use GEN_AI_* attributes instead. + Documents or content chunks used as context for the AI model. Example: ["doc1.txt", "doc2.pdf"] """ AI_FINISH_REASON = "ai.finish_reason" """ + .. deprecated:: + This attribute is deprecated. Use GEN_AI_RESPONSE_FINISH_REASONS instead. + The reason why the model stopped generating. Example: "length" """ AI_FREQUENCY_PENALTY = "ai.frequency_penalty" """ + .. deprecated:: + This attribute is deprecated. Use GEN_AI_REQUEST_FREQUENCY_PENALTY instead. + Used to reduce repetitiveness of generated tokens. Example: 0.5 """ AI_FUNCTION_CALL = "ai.function_call" """ + .. deprecated:: + This attribute is deprecated. Use GEN_AI_RESPONSE_TOOL_CALLS instead. + For an AI model call, the function that was called. This is deprecated for OpenAI, and replaced by tool_calls """ AI_GENERATION_ID = "ai.generation_id" """ + .. deprecated:: + This attribute is deprecated. Use GEN_AI_RESPONSE_ID instead. + Unique identifier for the completion. Example: "gen_123abc" """ AI_INPUT_MESSAGES = "ai.input_messages" """ + .. deprecated:: + This attribute is deprecated. Use GEN_AI_REQUEST_MESSAGES instead. + The input messages to an LLM call. Example: [{"role": "user", "message": "hello"}] """ AI_LOGIT_BIAS = "ai.logit_bias" """ + .. deprecated:: + This attribute is deprecated. Use GEN_AI_* attributes instead. + For an AI model call, the logit bias """ AI_METADATA = "ai.metadata" """ + .. deprecated:: + This attribute is deprecated. Use GEN_AI_* attributes instead. + Extra metadata passed to an AI pipeline step. Example: {"executed_function": "add_integers"} """ AI_MODEL_ID = "ai.model_id" """ - The unique descriptor of the model being execugted + .. deprecated:: + This attribute is deprecated. Use GEN_AI_REQUEST_MODEL or GEN_AI_RESPONSE_MODEL instead. + + The unique descriptor of the model being executed. Example: gpt-4 """ AI_PIPELINE_NAME = "ai.pipeline.name" """ + .. deprecated:: + This attribute is deprecated. Use GEN_AI_PIPELINE_NAME instead. + Name of the AI pipeline or chain being executed. - DEPRECATED: Use GEN_AI_PIPELINE_NAME instead. Example: "qa-pipeline" """ AI_PREAMBLE = "ai.preamble" """ + .. deprecated:: + This attribute is deprecated. Use GEN_AI_* attributes instead. + For an AI model call, the preamble parameter. Preambles are a part of the prompt used to adjust the model's overall behavior and conversation style. Example: "You are now a clown." @@ -185,100 +231,150 @@ class SPANDATA: AI_PRESENCE_PENALTY = "ai.presence_penalty" """ + .. deprecated:: + This attribute is deprecated. Use GEN_AI_REQUEST_PRESENCE_PENALTY instead. + Used to reduce repetitiveness of generated tokens. Example: 0.5 """ AI_RAW_PROMPTING = "ai.raw_prompting" """ + .. deprecated:: + This attribute is deprecated. Use GEN_AI_* attributes instead. + Minimize pre-processing done to the prompt sent to the LLM. Example: true """ AI_RESPONSE_FORMAT = "ai.response_format" """ + .. deprecated:: + This attribute is deprecated. Use GEN_AI_* attributes instead. + For an AI model call, the format of the response """ AI_RESPONSES = "ai.responses" """ + .. deprecated:: + This attribute is deprecated. Use GEN_AI_RESPONSE_TEXT instead. + The responses to an AI model call. Always as a list. Example: ["hello", "world"] """ AI_SEARCH_QUERIES = "ai.search_queries" """ + .. deprecated:: + This attribute is deprecated. Use GEN_AI_* attributes instead. + Queries used to search for relevant context or documents. Example: ["climate change effects", "renewable energy"] """ AI_SEARCH_REQUIRED = "ai.is_search_required" """ + .. deprecated:: + This attribute is deprecated. Use GEN_AI_* attributes instead. + Boolean indicating if the model needs to perform a search. Example: true """ AI_SEARCH_RESULTS = "ai.search_results" """ + .. deprecated:: + This attribute is deprecated. Use GEN_AI_* attributes instead. + Results returned from search queries for context. Example: ["Result 1", "Result 2"] """ AI_SEED = "ai.seed" """ + .. deprecated:: + This attribute is deprecated. Use GEN_AI_REQUEST_SEED instead. + The seed, ideally models given the same seed and same other parameters will produce the exact same output. Example: 123.45 """ AI_STREAMING = "ai.streaming" """ + .. deprecated:: + This attribute is deprecated. Use GEN_AI_RESPONSE_STREAMING instead. + Whether or not the AI model call's response was streamed back asynchronously - DEPRECATED: Use GEN_AI_RESPONSE_STREAMING instead. Example: true """ AI_TAGS = "ai.tags" """ + .. deprecated:: + This attribute is deprecated. Use GEN_AI_* attributes instead. + Tags that describe an AI pipeline step. Example: {"executed_function": "add_integers"} """ AI_TEMPERATURE = "ai.temperature" """ + .. deprecated:: + This attribute is deprecated. Use GEN_AI_REQUEST_TEMPERATURE instead. + For an AI model call, the temperature parameter. Temperature essentially means how random the output will be. Example: 0.5 """ AI_TEXTS = "ai.texts" """ + .. deprecated:: + This attribute is deprecated. Use GEN_AI_* attributes instead. + Raw text inputs provided to the model. Example: ["What is machine learning?"] """ AI_TOP_K = "ai.top_k" """ + .. deprecated:: + This attribute is deprecated. Use GEN_AI_REQUEST_TOP_K instead. + For an AI model call, the top_k parameter. Top_k essentially controls how random the output will be. Example: 35 """ AI_TOP_P = "ai.top_p" """ + .. deprecated:: + This attribute is deprecated. Use GEN_AI_REQUEST_TOP_P instead. + For an AI model call, the top_p parameter. Top_p essentially controls how random the output will be. Example: 0.5 """ AI_TOOL_CALLS = "ai.tool_calls" """ + .. deprecated:: + This attribute is deprecated. Use GEN_AI_RESPONSE_TOOL_CALLS instead. + For an AI model call, the function that was called. This is deprecated for OpenAI, and replaced by tool_calls """ AI_TOOLS = "ai.tools" """ + .. deprecated:: + This attribute is deprecated. Use GEN_AI_REQUEST_AVAILABLE_TOOLS instead. + For an AI model call, the functions that are available """ AI_WARNINGS = "ai.warnings" """ + .. deprecated:: + This attribute is deprecated. Use GEN_AI_* attributes instead. + Warning messages generated during model execution. Example: ["Token limit exceeded"] """ @@ -383,6 +479,18 @@ class SPANDATA: Example: "qa-pipeline" """ + GEN_AI_RESPONSE_FINISH_REASONS = "gen_ai.response.finish_reasons" + """ + The reason why the model stopped generating. + Example: "COMPLETE" + """ + + GEN_AI_RESPONSE_ID = "gen_ai.response.id" + """ + Unique identifier for the completion. + Example: "gen_123abc" + """ + GEN_AI_RESPONSE_MODEL = "gen_ai.response.model" """ Exact model identifier used to generate the response @@ -443,12 +551,24 @@ class SPANDATA: Example: 0.1 """ + GEN_AI_REQUEST_SEED = "gen_ai.request.seed" + """ + The seed, ideally models given the same seed and same other parameters will produce the exact same output. + Example: "1234567890" + """ + GEN_AI_REQUEST_TEMPERATURE = "gen_ai.request.temperature" """ The temperature parameter used to control randomness in the output. Example: 0.7 """ + GEN_AI_REQUEST_TOP_K = "gen_ai.request.top_k" + """ + Limits the model to only consider the K most likely next tokens, where K is an integer (e.g., top_k=20 means only the 20 highest probability tokens are considered). + Example: 35 + """ + GEN_AI_REQUEST_TOP_P = "gen_ai.request.top_p" """ The top_p parameter used to control diversity via nucleus sampling. @@ -675,6 +795,7 @@ class OP: GEN_AI_EMBEDDINGS = "gen_ai.embeddings" GEN_AI_EXECUTE_TOOL = "gen_ai.execute_tool" GEN_AI_HANDOFF = "gen_ai.handoff" + GEN_AI_PIPELINE = "gen_ai.pipeline" GEN_AI_INVOKE_AGENT = "gen_ai.invoke_agent" GEN_AI_RESPONSES = "gen_ai.responses" GRAPHQL_EXECUTE = "graphql.execute" @@ -702,11 +823,6 @@ class OP: HUGGINGFACE_HUB_CHAT_COMPLETIONS_CREATE = ( "ai.chat_completions.create.huggingface_hub" ) - LANGCHAIN_PIPELINE = "ai.pipeline.langchain" - LANGCHAIN_RUN = "ai.run.langchain" - LANGCHAIN_TOOL = "ai.tool.langchain" - LANGCHAIN_AGENT = "ai.agent.langchain" - LANGCHAIN_CHAT_COMPLETIONS_CREATE = "ai.chat_completions.create.langchain" QUEUE_PROCESS = "queue.process" QUEUE_PUBLISH = "queue.publish" QUEUE_SUBMIT_ARQ = "queue.submit.arq" @@ -798,6 +914,8 @@ def __init__( custom_repr=None, # type: Optional[Callable[..., Optional[str]]] add_full_stack=DEFAULT_ADD_FULL_STACK, # type: bool max_stack_frames=DEFAULT_MAX_STACK_FRAMES, # type: Optional[int] + enable_logs=False, # type: bool + before_send_log=None, # type: Optional[Callable[[Log, Hint], Optional[Log]]] ): # type: (...) -> None """Initialize the Sentry SDK with the given parameters. All parameters described here can be used in a call to `sentry_sdk.init()`. @@ -1168,7 +1286,6 @@ def __init__( :param profile_session_sample_rate: - :param enable_tracing: :param propagate_traces: @@ -1179,6 +1296,14 @@ def __init__( :param instrumenter: + :param enable_logs: Set `enable_logs` to True to enable the SDK to emit + Sentry logs. Defaults to False. + + :param before_send_log: An optional function to modify or filter out logs + before they're sent to Sentry. Any modifications to the log in this + function will be retained. If the function returns None, the log will + not be sent to Sentry. + :param _experiments: """ pass @@ -1204,4 +1329,4 @@ def _get_default_options(): del _get_default_options -VERSION = "2.34.1" +VERSION = "2.35.0" diff --git a/sentry_sdk/crons/api.py b/sentry_sdk/crons/api.py index 20e95685a7..b67e5961c8 100644 --- a/sentry_sdk/crons/api.py +++ b/sentry_sdk/crons/api.py @@ -1,6 +1,7 @@ import uuid import sentry_sdk +from sentry_sdk.utils import logger from typing import TYPE_CHECKING @@ -54,4 +55,8 @@ def capture_checkin( sentry_sdk.capture_event(check_in_event) + logger.debug( + f"[Crons] Captured check-in ({check_in_event.get('check_in_id')}): {check_in_event.get('monitor_slug')} -> {check_in_event.get('status')}" + ) + return check_in_event["check_in_id"] diff --git a/sentry_sdk/integrations/anthropic.py b/sentry_sdk/integrations/anthropic.py index 1e1f9112a1..05d45ef62f 100644 --- a/sentry_sdk/integrations/anthropic.py +++ b/sentry_sdk/integrations/anthropic.py @@ -1,8 +1,10 @@ from functools import wraps +import json from typing import TYPE_CHECKING import sentry_sdk from sentry_sdk.ai.monitoring import record_token_usage +from sentry_sdk.ai.utils import set_data_normalized from sentry_sdk.consts import OP, SPANDATA from sentry_sdk.integrations import _check_minimum_version, DidNotEnable, Integration from sentry_sdk.scope import should_send_default_pii @@ -10,9 +12,15 @@ capture_internal_exceptions, event_from_exception, package_version, + safe_serialize, ) try: + try: + from anthropic import NOT_GIVEN + except ImportError: + NOT_GIVEN = None + from anthropic.resources import AsyncMessages, Messages if TYPE_CHECKING: @@ -53,8 +61,11 @@ def _capture_exception(exc): sentry_sdk.capture_event(event, hint=hint) -def _calculate_token_usage(result, span): - # type: (Messages, Span) -> None +def _get_token_usage(result): + # type: (Messages) -> tuple[int, int] + """ + Get token usage from the Anthropic response. + """ input_tokens = 0 output_tokens = 0 if hasattr(result, "usage"): @@ -64,37 +75,13 @@ def _calculate_token_usage(result, span): if hasattr(usage, "output_tokens") and isinstance(usage.output_tokens, int): output_tokens = usage.output_tokens - total_tokens = input_tokens + output_tokens + return input_tokens, output_tokens - record_token_usage( - span, - input_tokens=input_tokens, - output_tokens=output_tokens, - total_tokens=total_tokens, - ) - -def _get_responses(content): - # type: (list[Any]) -> list[dict[str, Any]] +def _collect_ai_data(event, model, input_tokens, output_tokens, content_blocks): + # type: (MessageStreamEvent, str | None, int, int, list[str]) -> tuple[str | None, int, int, list[str]] """ - Get JSON of a Anthropic responses. - """ - responses = [] - for item in content: - if hasattr(item, "text"): - responses.append( - { - "type": item.type, - "text": item.text, - } - ) - return responses - - -def _collect_ai_data(event, input_tokens, output_tokens, content_blocks): - # type: (MessageStreamEvent, int, int, list[str]) -> tuple[int, int, list[str]] - """ - Count token usage and collect content blocks from the AI streaming response. + Collect model information, token usage, and collect content blocks from the AI streaming response. """ with capture_internal_exceptions(): if hasattr(event, "type"): @@ -102,6 +89,7 @@ def _collect_ai_data(event, input_tokens, output_tokens, content_blocks): usage = event.message.usage input_tokens += usage.input_tokens output_tokens += usage.output_tokens + model = event.message.model or model elif event.type == "content_block_start": pass elif event.type == "content_block_delta": @@ -114,31 +102,80 @@ def _collect_ai_data(event, input_tokens, output_tokens, content_blocks): elif event.type == "message_delta": output_tokens += event.usage.output_tokens - return input_tokens, output_tokens, content_blocks + return model, input_tokens, output_tokens, content_blocks -def _add_ai_data_to_span( - span, integration, input_tokens, output_tokens, content_blocks -): - # type: (Span, AnthropicIntegration, int, int, list[str]) -> None +def _set_input_data(span, kwargs, integration): + # type: (Span, dict[str, Any], AnthropicIntegration) -> None """ - Add token usage and content blocks from the AI streaming response to the span. + Set input data for the span based on the provided keyword arguments for the anthropic message creation. """ - with capture_internal_exceptions(): - if should_send_default_pii() and integration.include_prompts: - complete_message = "".join(content_blocks) - span.set_data( - SPANDATA.AI_RESPONSES, - [{"type": "text", "text": complete_message}], - ) - total_tokens = input_tokens + output_tokens - record_token_usage( + messages = kwargs.get("messages") + if ( + messages is not None + and len(messages) > 0 + and should_send_default_pii() + and integration.include_prompts + ): + set_data_normalized( + span, SPANDATA.GEN_AI_REQUEST_MESSAGES, safe_serialize(messages) + ) + + set_data_normalized( + span, SPANDATA.GEN_AI_RESPONSE_STREAMING, kwargs.get("stream", False) + ) + + kwargs_keys_to_attributes = { + "max_tokens": SPANDATA.GEN_AI_REQUEST_MAX_TOKENS, + "model": SPANDATA.GEN_AI_REQUEST_MODEL, + "temperature": SPANDATA.GEN_AI_REQUEST_TEMPERATURE, + "top_k": SPANDATA.GEN_AI_REQUEST_TOP_K, + "top_p": SPANDATA.GEN_AI_REQUEST_TOP_P, + } + for key, attribute in kwargs_keys_to_attributes.items(): + value = kwargs.get(key) + if value is not NOT_GIVEN and value is not None: + set_data_normalized(span, attribute, value) + + # Input attributes: Tools + tools = kwargs.get("tools") + if tools is not NOT_GIVEN and tools is not None and len(tools) > 0: + set_data_normalized( + span, SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS, safe_serialize(tools) + ) + + +def _set_output_data( + span, + integration, + model, + input_tokens, + output_tokens, + content_blocks, + finish_span=False, +): + # type: (Span, AnthropicIntegration, str | None, int | None, int | None, list[Any], bool) -> None + """ + Set output data for the span based on the AI response.""" + span.set_data(SPANDATA.GEN_AI_RESPONSE_MODEL, model) + if should_send_default_pii() and integration.include_prompts: + set_data_normalized( span, - input_tokens=input_tokens, - output_tokens=output_tokens, - total_tokens=total_tokens, + SPANDATA.GEN_AI_RESPONSE_TEXT, + json.dumps(content_blocks), + unpack=False, ) - span.set_data(SPANDATA.AI_STREAMING, True) + + record_token_usage( + span, + input_tokens=input_tokens, + output_tokens=output_tokens, + ) + + # TODO: GEN_AI_RESPONSE_TOOL_CALLS ? + + if finish_span: + span.__exit__(None, None, None) def _sentry_patched_create_common(f, *args, **kwargs): @@ -155,31 +192,41 @@ def _sentry_patched_create_common(f, *args, **kwargs): except TypeError: return f(*args, **kwargs) + model = kwargs.get("model", "") + span = sentry_sdk.start_span( - op=OP.ANTHROPIC_MESSAGES_CREATE, - description="Anthropic messages create", + op=OP.GEN_AI_CHAT, + name=f"chat {model}".strip(), origin=AnthropicIntegration.origin, ) span.__enter__() - result = yield f, args, kwargs + _set_input_data(span, kwargs, integration) - # add data to span and finish it - messages = list(kwargs["messages"]) - model = kwargs.get("model") + result = yield f, args, kwargs with capture_internal_exceptions(): - span.set_data(SPANDATA.AI_MODEL_ID, model) - span.set_data(SPANDATA.AI_STREAMING, False) - - if should_send_default_pii() and integration.include_prompts: - span.set_data(SPANDATA.AI_INPUT_MESSAGES, messages) - if hasattr(result, "content"): - if should_send_default_pii() and integration.include_prompts: - span.set_data(SPANDATA.AI_RESPONSES, _get_responses(result.content)) - _calculate_token_usage(result, span) - span.__exit__(None, None, None) + input_tokens, output_tokens = _get_token_usage(result) + + content_blocks = [] + for content_block in result.content: + if hasattr(content_block, "to_dict"): + content_blocks.append(content_block.to_dict()) + elif hasattr(content_block, "model_dump"): + content_blocks.append(content_block.model_dump()) + elif hasattr(content_block, "text"): + content_blocks.append({"type": "text", "text": content_block.text}) + + _set_output_data( + span=span, + integration=integration, + model=getattr(result, "model", None), + input_tokens=input_tokens, + output_tokens=output_tokens, + content_blocks=content_blocks, + finish_span=True, + ) # Streaming response elif hasattr(result, "_iterator"): @@ -187,37 +234,53 @@ def _sentry_patched_create_common(f, *args, **kwargs): def new_iterator(): # type: () -> Iterator[MessageStreamEvent] + model = None input_tokens = 0 output_tokens = 0 content_blocks = [] # type: list[str] for event in old_iterator: - input_tokens, output_tokens, content_blocks = _collect_ai_data( - event, input_tokens, output_tokens, content_blocks + model, input_tokens, output_tokens, content_blocks = ( + _collect_ai_data( + event, model, input_tokens, output_tokens, content_blocks + ) ) yield event - _add_ai_data_to_span( - span, integration, input_tokens, output_tokens, content_blocks + _set_output_data( + span=span, + integration=integration, + model=model, + input_tokens=input_tokens, + output_tokens=output_tokens, + content_blocks=[{"text": "".join(content_blocks), "type": "text"}], + finish_span=True, ) - span.__exit__(None, None, None) async def new_iterator_async(): # type: () -> AsyncIterator[MessageStreamEvent] + model = None input_tokens = 0 output_tokens = 0 content_blocks = [] # type: list[str] async for event in old_iterator: - input_tokens, output_tokens, content_blocks = _collect_ai_data( - event, input_tokens, output_tokens, content_blocks + model, input_tokens, output_tokens, content_blocks = ( + _collect_ai_data( + event, model, input_tokens, output_tokens, content_blocks + ) ) yield event - _add_ai_data_to_span( - span, integration, input_tokens, output_tokens, content_blocks + _set_output_data( + span=span, + integration=integration, + model=model, + input_tokens=input_tokens, + output_tokens=output_tokens, + content_blocks=[{"text": "".join(content_blocks), "type": "text"}], + finish_span=True, ) - span.__exit__(None, None, None) if str(type(result._iterator)) == "": result._iterator = new_iterator_async() diff --git a/sentry_sdk/integrations/asgi.py b/sentry_sdk/integrations/asgi.py index 1b020ebbc0..dde8128a33 100644 --- a/sentry_sdk/integrations/asgi.py +++ b/sentry_sdk/integrations/asgi.py @@ -12,7 +12,6 @@ import sentry_sdk from sentry_sdk.api import continue_trace from sentry_sdk.consts import OP - from sentry_sdk.integrations._asgi_common import ( _get_headers, _get_request_data, @@ -42,7 +41,6 @@ if TYPE_CHECKING: from typing import Any - from typing import Callable from typing import Dict from typing import Optional from typing import Tuple @@ -102,6 +100,7 @@ def __init__( mechanism_type="asgi", # type: str span_origin="manual", # type: str http_methods_to_capture=DEFAULT_HTTP_METHODS_TO_CAPTURE, # type: Tuple[str, ...] + asgi_version=None, # type: Optional[int] ): # type: (...) -> None """ @@ -140,10 +139,16 @@ def __init__( self.app = app self.http_methods_to_capture = http_methods_to_capture - if _looks_like_asgi3(app): - self.__call__ = self._run_asgi3 # type: Callable[..., Any] - else: - self.__call__ = self._run_asgi2 + if asgi_version is None: + if _looks_like_asgi3(app): + asgi_version = 3 + else: + asgi_version = 2 + + if asgi_version == 3: + self.__call__ = self._run_asgi3 + elif asgi_version == 2: + self.__call__ = self._run_asgi2 # type: ignore def _capture_lifespan_exception(self, exc): # type: (Exception) -> None @@ -217,10 +222,6 @@ async def _run_app(self, scope, receive, send, asgi_version): source=transaction_source, origin=self.span_origin, ) - logger.debug( - "[ASGI] Created transaction (continuing trace): %s", - transaction, - ) else: transaction = Transaction( op=OP.HTTP_SERVER, @@ -228,17 +229,9 @@ async def _run_app(self, scope, receive, send, asgi_version): source=transaction_source, origin=self.span_origin, ) - logger.debug( - "[ASGI] Created transaction (new): %s", transaction - ) if transaction: transaction.set_tag("asgi.type", ty) - logger.debug( - "[ASGI] Set transaction name and source on transaction: '%s' / '%s'", - transaction.name, - transaction.source, - ) with ( sentry_sdk.start_transaction( @@ -248,7 +241,6 @@ async def _run_app(self, scope, receive, send, asgi_version): if transaction is not None else nullcontext() ): - logger.debug("[ASGI] Started transaction: %s", transaction) try: async def _sentry_wrapped_send(event): @@ -303,12 +295,6 @@ def event_processor(self, event, hint, asgi_scope): event["transaction"] = name event["transaction_info"] = {"source": source} - logger.debug( - "[ASGI] Set transaction name and source in event_processor: '%s' / '%s'", - event["transaction"], - event["transaction_info"]["source"], - ) - return event # Helper functions. diff --git a/sentry_sdk/integrations/clickhouse_driver.py b/sentry_sdk/integrations/clickhouse_driver.py index 2561bfad04..bbaaaeec8e 100644 --- a/sentry_sdk/integrations/clickhouse_driver.py +++ b/sentry_sdk/integrations/clickhouse_driver.py @@ -11,7 +11,8 @@ # without introducing a hard dependency on `typing_extensions` # from: https://stackoverflow.com/a/71944042/300572 if TYPE_CHECKING: - from typing import ParamSpec, Callable + from collections.abc import Iterator + from typing import Any, ParamSpec, Callable else: # Fake ParamSpec class ParamSpec: @@ -49,9 +50,7 @@ def setup_once() -> None: ) # If the query contains parameters then the send_data function is used to send those parameters to clickhouse - clickhouse_driver.client.Client.send_data = _wrap_send_data( - clickhouse_driver.client.Client.send_data - ) + _wrap_send_data() # Every query ends either with the Client's `receive_end_of_query` (no result expected) # or its `receive_result` (result expected) @@ -128,23 +127,44 @@ def _inner_end(*args: P.args, **kwargs: P.kwargs) -> T: return _inner_end -def _wrap_send_data(f: Callable[P, T]) -> Callable[P, T]: - def _inner_send_data(*args: P.args, **kwargs: P.kwargs) -> T: - instance = args[0] # type: clickhouse_driver.client.Client - data = args[2] - span = getattr(instance.connection, "_sentry_span", None) +def _wrap_send_data() -> None: + original_send_data = clickhouse_driver.client.Client.send_data + + def _inner_send_data( # type: ignore[no-untyped-def] # clickhouse-driver does not type send_data + self, sample_block, data, types_check=False, columnar=False, *args, **kwargs + ): + span = getattr(self.connection, "_sentry_span", None) if span is not None: - _set_db_data(span, instance.connection) + _set_db_data(span, self.connection) if should_send_default_pii(): db_params = span._data.get("db.params", []) - db_params.extend(data) + + if isinstance(data, (list, tuple)): + db_params.extend(data) + + else: # data is a generic iterator + orig_data = data + + # Wrap the generator to add items to db.params as they are yielded. + # This allows us to send the params to Sentry without needing to allocate + # memory for the entire generator at once. + def wrapped_generator() -> "Iterator[Any]": + for item in orig_data: + db_params.append(item) + yield item + + # Replace the original iterator with the wrapped one. + data = wrapped_generator() + span.set_data("db.params", db_params) - return f(*args, **kwargs) + return original_send_data( + self, sample_block, data, types_check, columnar, *args, **kwargs + ) - return _inner_send_data + clickhouse_driver.client.Client.send_data = _inner_send_data def _set_db_data( diff --git a/sentry_sdk/integrations/django/asgi.py b/sentry_sdk/integrations/django/asgi.py index 63a3f0b8f2..773c538045 100644 --- a/sentry_sdk/integrations/django/asgi.py +++ b/sentry_sdk/integrations/django/asgi.py @@ -155,7 +155,7 @@ async def sentry_patched_asgi_handler(self, receive, send): http_methods_to_capture=integration.http_methods_to_capture, ) - return await middleware(self.scope)(receive, send) + return await middleware(self.scope)(receive, send) # type: ignore cls.__call__ = sentry_patched_asgi_handler diff --git a/sentry_sdk/integrations/fastapi.py b/sentry_sdk/integrations/fastapi.py index 76c6adee0f..1473cbcab7 100644 --- a/sentry_sdk/integrations/fastapi.py +++ b/sentry_sdk/integrations/fastapi.py @@ -6,10 +6,7 @@ from sentry_sdk.integrations import DidNotEnable from sentry_sdk.scope import should_send_default_pii from sentry_sdk.tracing import SOURCE_FOR_STYLE, TransactionSource -from sentry_sdk.utils import ( - transaction_from_function, - logger, -) +from sentry_sdk.utils import transaction_from_function from typing import TYPE_CHECKING @@ -66,9 +63,6 @@ def _set_transaction_name_and_source(scope, transaction_style, request): source = SOURCE_FOR_STYLE[transaction_style] scope.set_transaction_name(name, source=source) - logger.debug( - "[FastAPI] Set transaction name and source on scope: %s / %s", name, source - ) def patch_get_request_handler(): diff --git a/sentry_sdk/integrations/gnu_backtrace.py b/sentry_sdk/integrations/gnu_backtrace.py index 21d8ea9b38..8241e27f13 100644 --- a/sentry_sdk/integrations/gnu_backtrace.py +++ b/sentry_sdk/integrations/gnu_backtrace.py @@ -11,13 +11,16 @@ from typing import Any from sentry_sdk._types import Event - -FUNCTION_RE = r"[^@]+?)\s+@\s+0x[0-9a-fA-F]+" +# function is everything between index at @ +# and then we match on the @ plus the hex val +FUNCTION_RE = r"[^@]+?" +HEX_ADDRESS = r"\s+@\s+0x[0-9a-fA-F]+" FRAME_RE = r""" -^(?P\d+)\.\s+(?P{FUNCTION_RE}\s+in\s+(?P.+)$ +^(?P\d+)\.\s+(?P{FUNCTION_RE}){HEX_ADDRESS}(?:\s+in\s+(?P.+))?$ """.format( FUNCTION_RE=FUNCTION_RE, + HEX_ADDRESS=HEX_ADDRESS, ) FRAME_RE = re.compile(FRAME_RE, re.MULTILINE | re.VERBOSE) diff --git a/sentry_sdk/integrations/langchain.py b/sentry_sdk/integrations/langchain.py index 8b67c4c994..7e04a740ed 100644 --- a/sentry_sdk/integrations/langchain.py +++ b/sentry_sdk/integrations/langchain.py @@ -3,55 +3,59 @@ from functools import wraps import sentry_sdk -from sentry_sdk.ai.monitoring import set_ai_pipeline_name, record_token_usage -from sentry_sdk.consts import OP, SPANDATA +from sentry_sdk.ai.monitoring import set_ai_pipeline_name from sentry_sdk.ai.utils import set_data_normalized +from sentry_sdk.consts import OP, SPANDATA +from sentry_sdk.integrations import DidNotEnable, Integration from sentry_sdk.scope import should_send_default_pii from sentry_sdk.tracing import Span -from sentry_sdk.integrations import DidNotEnable, Integration +from sentry_sdk.tracing_utils import _get_value from sentry_sdk.utils import logger, capture_internal_exceptions from typing import TYPE_CHECKING if TYPE_CHECKING: - from typing import Any, List, Callable, Dict, Union, Optional + from typing import ( + Any, + AsyncIterator, + Callable, + Dict, + Iterator, + List, + Optional, + Union, + ) from uuid import UUID + try: - from langchain_core.messages import BaseMessage - from langchain_core.outputs import LLMResult + from langchain.agents import AgentExecutor + from langchain_core.agents import AgentFinish from langchain_core.callbacks import ( - manager, BaseCallbackHandler, BaseCallbackManager, Callbacks, + manager, ) - from langchain_core.agents import AgentAction, AgentFinish + from langchain_core.messages import BaseMessage + from langchain_core.outputs import LLMResult + except ImportError: raise DidNotEnable("langchain not installed") DATA_FIELDS = { - "temperature": SPANDATA.AI_TEMPERATURE, - "top_p": SPANDATA.AI_TOP_P, - "top_k": SPANDATA.AI_TOP_K, - "function_call": SPANDATA.AI_FUNCTION_CALL, - "tool_calls": SPANDATA.AI_TOOL_CALLS, - "tools": SPANDATA.AI_TOOLS, - "response_format": SPANDATA.AI_RESPONSE_FORMAT, - "logit_bias": SPANDATA.AI_LOGIT_BIAS, - "tags": SPANDATA.AI_TAGS, + "frequency_penalty": SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY, + "function_call": SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS, + "max_tokens": SPANDATA.GEN_AI_REQUEST_MAX_TOKENS, + "presence_penalty": SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY, + "temperature": SPANDATA.GEN_AI_REQUEST_TEMPERATURE, + "tool_calls": SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS, + "tools": SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS, + "top_k": SPANDATA.GEN_AI_REQUEST_TOP_K, + "top_p": SPANDATA.GEN_AI_REQUEST_TOP_P, } -# To avoid double collecting tokens, we do *not* measure -# token counts for models for which we have an explicit integration -NO_COLLECT_TOKEN_MODELS = [ - "openai-chat", - "anthropic-chat", - "cohere-chat", - "huggingface_endpoint", -] - class LangchainIntegration(Integration): identifier = "langchain" @@ -60,25 +64,23 @@ class LangchainIntegration(Integration): # The most number of spans (e.g., LLM calls) that can be processed at the same time. max_spans = 1024 - def __init__( - self, include_prompts=True, max_spans=1024, tiktoken_encoding_name=None - ): - # type: (LangchainIntegration, bool, int, Optional[str]) -> None + def __init__(self, include_prompts=True, max_spans=1024): + # type: (LangchainIntegration, bool, int) -> None self.include_prompts = include_prompts self.max_spans = max_spans - self.tiktoken_encoding_name = tiktoken_encoding_name @staticmethod def setup_once(): # type: () -> None manager._configure = _wrap_configure(manager._configure) + if AgentExecutor is not None: + AgentExecutor.invoke = _wrap_agent_executor_invoke(AgentExecutor.invoke) + AgentExecutor.stream = _wrap_agent_executor_stream(AgentExecutor.stream) + class WatchedSpan: span = None # type: Span - num_completion_tokens = 0 # type: int - num_prompt_tokens = 0 # type: int - no_collect_tokens = False # type: bool children = [] # type: List[WatchedSpan] is_pipeline = False # type: bool @@ -88,26 +90,14 @@ def __init__(self, span): class SentryLangchainCallback(BaseCallbackHandler): # type: ignore[misc] - """Base callback handler that can be used to handle callbacks from langchain.""" + """Callback handler that creates Sentry spans.""" - def __init__(self, max_span_map_size, include_prompts, tiktoken_encoding_name=None): - # type: (int, bool, Optional[str]) -> None + def __init__(self, max_span_map_size, include_prompts): + # type: (int, bool) -> None self.span_map = OrderedDict() # type: OrderedDict[UUID, WatchedSpan] self.max_span_map_size = max_span_map_size self.include_prompts = include_prompts - self.tiktoken_encoding = None - if tiktoken_encoding_name is not None: - import tiktoken # type: ignore - - self.tiktoken_encoding = tiktoken.get_encoding(tiktoken_encoding_name) - - def count_tokens(self, s): - # type: (str) -> int - if self.tiktoken_encoding is not None: - return len(self.tiktoken_encoding.encode_ordinary(s)) - return 0 - def gc_span_map(self): # type: () -> None @@ -117,39 +107,37 @@ def gc_span_map(self): def _handle_error(self, run_id, error): # type: (UUID, Any) -> None - if not run_id or run_id not in self.span_map: - return + with capture_internal_exceptions(): + if not run_id or run_id not in self.span_map: + return - span_data = self.span_map[run_id] - if not span_data: - return - sentry_sdk.capture_exception(error, span_data.span.scope) - span_data.span.__exit__(None, None, None) - del self.span_map[run_id] + span_data = self.span_map[run_id] + span = span_data.span + span.set_status("unknown") + + sentry_sdk.capture_exception(error, span.scope) + + span.__exit__(None, None, None) + del self.span_map[run_id] def _normalize_langchain_message(self, message): # type: (BaseMessage) -> Any - parsed = {"content": message.content, "role": message.type} + parsed = {"role": message.type, "content": message.content} parsed.update(message.additional_kwargs) return parsed def _create_span(self, run_id, parent_id, **kwargs): # type: (SentryLangchainCallback, UUID, Optional[Any], Any) -> WatchedSpan - watched_span = None # type: Optional[WatchedSpan] if parent_id: parent_span = self.span_map.get(parent_id) # type: Optional[WatchedSpan] if parent_span: watched_span = WatchedSpan(parent_span.span.start_child(**kwargs)) parent_span.children.append(watched_span) + if watched_span is None: watched_span = WatchedSpan(sentry_sdk.start_span(**kwargs)) - if kwargs.get("op", "").startswith("ai.pipeline."): - if kwargs.get("name"): - set_ai_pipeline_name(kwargs.get("name")) - watched_span.is_pipeline = True - watched_span.span.__enter__() self.span_map[run_id] = watched_span self.gc_span_map() @@ -157,7 +145,6 @@ def _create_span(self, run_id, parent_id, **kwargs): def _exit_span(self, span_data, run_id): # type: (SentryLangchainCallback, WatchedSpan, UUID) -> None - if span_data.is_pipeline: set_ai_pipeline_name(None) @@ -180,21 +167,44 @@ def on_llm_start( with capture_internal_exceptions(): if not run_id: return + all_params = kwargs.get("invocation_params", {}) all_params.update(serialized.get("kwargs", {})) + + model = ( + all_params.get("model") + or all_params.get("model_name") + or all_params.get("model_id") + or "" + ) + watched_span = self._create_span( run_id, - kwargs.get("parent_run_id"), - op=OP.LANGCHAIN_RUN, + parent_run_id, + op=OP.GEN_AI_PIPELINE, name=kwargs.get("name") or "Langchain LLM call", origin=LangchainIntegration.origin, ) span = watched_span.span + + if model: + span.set_data( + SPANDATA.GEN_AI_REQUEST_MODEL, + model, + ) + + ai_type = all_params.get("_type", "") + if "anthropic" in ai_type: + span.set_data(SPANDATA.GEN_AI_SYSTEM, "anthropic") + elif "openai" in ai_type: + span.set_data(SPANDATA.GEN_AI_SYSTEM, "openai") + + for key, attribute in DATA_FIELDS.items(): + if key in all_params and all_params[key] is not None: + set_data_normalized(span, attribute, all_params[key], unpack=False) + if should_send_default_pii() and self.include_prompts: - set_data_normalized(span, SPANDATA.AI_INPUT_MESSAGES, prompts) - for k, v in DATA_FIELDS.items(): - if k in all_params: - set_data_normalized(span, v, all_params[k]) + set_data_normalized(span, SPANDATA.GEN_AI_REQUEST_MESSAGES, prompts) def on_chat_model_start(self, serialized, messages, *, run_id, **kwargs): # type: (SentryLangchainCallback, Dict[str, Any], List[List[BaseMessage]], UUID, Any) -> Any @@ -202,170 +212,150 @@ def on_chat_model_start(self, serialized, messages, *, run_id, **kwargs): with capture_internal_exceptions(): if not run_id: return + all_params = kwargs.get("invocation_params", {}) all_params.update(serialized.get("kwargs", {})) + + model = ( + all_params.get("model") + or all_params.get("model_name") + or all_params.get("model_id") + or "" + ) + watched_span = self._create_span( run_id, kwargs.get("parent_run_id"), - op=OP.LANGCHAIN_CHAT_COMPLETIONS_CREATE, - name=kwargs.get("name") or "Langchain Chat Model", + op=OP.GEN_AI_CHAT, + name=f"chat {model}".strip(), origin=LangchainIntegration.origin, ) span = watched_span.span - model = all_params.get( - "model", all_params.get("model_name", all_params.get("model_id")) - ) - watched_span.no_collect_tokens = any( - x in all_params.get("_type", "") for x in NO_COLLECT_TOKEN_MODELS - ) - if not model and "anthropic" in all_params.get("_type"): - model = "claude-2" + span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "chat") if model: - span.set_data(SPANDATA.AI_MODEL_ID, model) + span.set_data(SPANDATA.GEN_AI_REQUEST_MODEL, model) + + ai_type = all_params.get("_type", "") + if "anthropic" in ai_type: + span.set_data(SPANDATA.GEN_AI_SYSTEM, "anthropic") + elif "openai" in ai_type: + span.set_data(SPANDATA.GEN_AI_SYSTEM, "openai") + + for key, attribute in DATA_FIELDS.items(): + if key in all_params and all_params[key] is not None: + set_data_normalized(span, attribute, all_params[key], unpack=False) + if should_send_default_pii() and self.include_prompts: set_data_normalized( span, - SPANDATA.AI_INPUT_MESSAGES, + SPANDATA.GEN_AI_REQUEST_MESSAGES, [ [self._normalize_langchain_message(x) for x in list_] for list_ in messages ], ) - for k, v in DATA_FIELDS.items(): - if k in all_params: - set_data_normalized(span, v, all_params[k]) - if not watched_span.no_collect_tokens: - for list_ in messages: - for message in list_: - self.span_map[run_id].num_prompt_tokens += self.count_tokens( - message.content - ) + self.count_tokens(message.type) - - def on_llm_new_token(self, token, *, run_id, **kwargs): - # type: (SentryLangchainCallback, str, UUID, Any) -> Any - """Run on new LLM token. Only available when streaming is enabled.""" + + def on_chat_model_end(self, response, *, run_id, **kwargs): + # type: (SentryLangchainCallback, LLMResult, UUID, Any) -> Any + """Run when Chat Model ends running.""" with capture_internal_exceptions(): if not run_id or run_id not in self.span_map: return + span_data = self.span_map[run_id] - if not span_data or span_data.no_collect_tokens: - return - span_data.num_completion_tokens += self.count_tokens(token) + span = span_data.span + + if should_send_default_pii() and self.include_prompts: + set_data_normalized( + span, + SPANDATA.GEN_AI_RESPONSE_TEXT, + [[x.text for x in list_] for list_ in response.generations], + ) + + _record_token_usage(span, response) + self._exit_span(span_data, run_id) def on_llm_end(self, response, *, run_id, **kwargs): # type: (SentryLangchainCallback, LLMResult, UUID, Any) -> Any """Run when LLM ends running.""" with capture_internal_exceptions(): - if not run_id: + if not run_id or run_id not in self.span_map: return - token_usage = ( - response.llm_output.get("token_usage") if response.llm_output else None - ) - span_data = self.span_map[run_id] - if not span_data: - return + span = span_data.span + + try: + generation = response.generations[0][0] + except IndexError: + generation = None + + if generation is not None: + try: + response_model = generation.generation_info.get("model_name") + if response_model is not None: + span.set_data(SPANDATA.GEN_AI_RESPONSE_MODEL, response_model) + except AttributeError: + pass + + try: + finish_reason = generation.generation_info.get("finish_reason") + if finish_reason is not None: + span.set_data( + SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS, finish_reason + ) + except AttributeError: + pass + + try: + tool_calls = getattr(generation.message, "tool_calls", None) + if tool_calls is not None and tool_calls != []: + set_data_normalized( + span, + SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS, + tool_calls, + unpack=False, + ) + except AttributeError: + pass if should_send_default_pii() and self.include_prompts: set_data_normalized( - span_data.span, - SPANDATA.AI_RESPONSES, + span, + SPANDATA.GEN_AI_RESPONSE_TEXT, [[x.text for x in list_] for list_ in response.generations], ) - if not span_data.no_collect_tokens: - if token_usage: - record_token_usage( - span_data.span, - input_tokens=token_usage.get("prompt_tokens"), - output_tokens=token_usage.get("completion_tokens"), - total_tokens=token_usage.get("total_tokens"), - ) - else: - record_token_usage( - span_data.span, - input_tokens=span_data.num_prompt_tokens, - output_tokens=span_data.num_completion_tokens, - ) - + _record_token_usage(span, response) self._exit_span(span_data, run_id) def on_llm_error(self, error, *, run_id, **kwargs): # type: (SentryLangchainCallback, Union[Exception, KeyboardInterrupt], UUID, Any) -> Any """Run when LLM errors.""" - with capture_internal_exceptions(): - self._handle_error(run_id, error) - - def on_chain_start(self, serialized, inputs, *, run_id, **kwargs): - # type: (SentryLangchainCallback, Dict[str, Any], Dict[str, Any], UUID, Any) -> Any - """Run when chain starts running.""" - with capture_internal_exceptions(): - if not run_id: - return - watched_span = self._create_span( - run_id, - kwargs.get("parent_run_id"), - op=( - OP.LANGCHAIN_RUN - if kwargs.get("parent_run_id") is not None - else OP.LANGCHAIN_PIPELINE - ), - name=kwargs.get("name") or "Chain execution", - origin=LangchainIntegration.origin, - ) - metadata = kwargs.get("metadata") - if metadata: - set_data_normalized(watched_span.span, SPANDATA.AI_METADATA, metadata) - - def on_chain_end(self, outputs, *, run_id, **kwargs): - # type: (SentryLangchainCallback, Dict[str, Any], UUID, Any) -> Any - """Run when chain ends running.""" - with capture_internal_exceptions(): - if not run_id or run_id not in self.span_map: - return - - span_data = self.span_map[run_id] - if not span_data: - return - self._exit_span(span_data, run_id) + self._handle_error(run_id, error) - def on_chain_error(self, error, *, run_id, **kwargs): + def on_chat_model_error(self, error, *, run_id, **kwargs): # type: (SentryLangchainCallback, Union[Exception, KeyboardInterrupt], UUID, Any) -> Any - """Run when chain errors.""" + """Run when Chat Model errors.""" self._handle_error(run_id, error) - def on_agent_action(self, action, *, run_id, **kwargs): - # type: (SentryLangchainCallback, AgentAction, UUID, Any) -> Any - with capture_internal_exceptions(): - if not run_id: - return - watched_span = self._create_span( - run_id, - kwargs.get("parent_run_id"), - op=OP.LANGCHAIN_AGENT, - name=action.tool or "AI tool usage", - origin=LangchainIntegration.origin, - ) - if action.tool_input and should_send_default_pii() and self.include_prompts: - set_data_normalized( - watched_span.span, SPANDATA.AI_INPUT_MESSAGES, action.tool_input - ) - def on_agent_finish(self, finish, *, run_id, **kwargs): # type: (SentryLangchainCallback, AgentFinish, UUID, Any) -> Any with capture_internal_exceptions(): - if not run_id: + if not run_id or run_id not in self.span_map: return span_data = self.span_map[run_id] - if not span_data: - return + span = span_data.span + if should_send_default_pii() and self.include_prompts: set_data_normalized( - span_data.span, SPANDATA.AI_RESPONSES, finish.return_values.items() + span, + SPANDATA.GEN_AI_RESPONSE_TEXT, + finish.return_values.items(), ) + self._exit_span(span_data, run_id) def on_tool_start(self, serialized, input_str, *, run_id, **kwargs): @@ -374,23 +364,31 @@ def on_tool_start(self, serialized, input_str, *, run_id, **kwargs): with capture_internal_exceptions(): if not run_id: return + + tool_name = serialized.get("name") or kwargs.get("name") or "" + watched_span = self._create_span( run_id, kwargs.get("parent_run_id"), - op=OP.LANGCHAIN_TOOL, - name=serialized.get("name") or kwargs.get("name") or "AI tool usage", + op=OP.GEN_AI_EXECUTE_TOOL, + name=f"execute_tool {tool_name}".strip(), origin=LangchainIntegration.origin, ) + span = watched_span.span + + span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "execute_tool") + span.set_data(SPANDATA.GEN_AI_TOOL_NAME, tool_name) + + tool_description = serialized.get("description") + if tool_description is not None: + span.set_data(SPANDATA.GEN_AI_TOOL_DESCRIPTION, tool_description) + if should_send_default_pii() and self.include_prompts: set_data_normalized( - watched_span.span, - SPANDATA.AI_INPUT_MESSAGES, + span, + SPANDATA.GEN_AI_TOOL_INPUT, kwargs.get("inputs", [input_str]), ) - if kwargs.get("metadata"): - set_data_normalized( - watched_span.span, SPANDATA.AI_METADATA, kwargs.get("metadata") - ) def on_tool_end(self, output, *, run_id, **kwargs): # type: (SentryLangchainCallback, str, UUID, Any) -> Any @@ -400,10 +398,11 @@ def on_tool_end(self, output, *, run_id, **kwargs): return span_data = self.span_map[run_id] - if not span_data: - return + span = span_data.span + if should_send_default_pii() and self.include_prompts: - set_data_normalized(span_data.span, SPANDATA.AI_RESPONSES, output) + set_data_normalized(span, SPANDATA.GEN_AI_TOOL_OUTPUT, output) + self._exit_span(span_data, run_id) def on_tool_error(self, error, *args, run_id, **kwargs): @@ -412,6 +411,126 @@ def on_tool_error(self, error, *args, run_id, **kwargs): self._handle_error(run_id, error) +def _extract_tokens(token_usage): + # type: (Any) -> tuple[Optional[int], Optional[int], Optional[int]] + if not token_usage: + return None, None, None + + input_tokens = _get_value(token_usage, "prompt_tokens") or _get_value( + token_usage, "input_tokens" + ) + output_tokens = _get_value(token_usage, "completion_tokens") or _get_value( + token_usage, "output_tokens" + ) + total_tokens = _get_value(token_usage, "total_tokens") + + return input_tokens, output_tokens, total_tokens + + +def _extract_tokens_from_generations(generations): + # type: (Any) -> tuple[Optional[int], Optional[int], Optional[int]] + """Extract token usage from response.generations structure.""" + if not generations: + return None, None, None + + total_input = 0 + total_output = 0 + total_total = 0 + + for gen_list in generations: + for gen in gen_list: + token_usage = _get_token_usage(gen) + input_tokens, output_tokens, total_tokens = _extract_tokens(token_usage) + total_input += input_tokens if input_tokens is not None else 0 + total_output += output_tokens if output_tokens is not None else 0 + total_total += total_tokens if total_tokens is not None else 0 + + return ( + total_input if total_input > 0 else None, + total_output if total_output > 0 else None, + total_total if total_total > 0 else None, + ) + + +def _get_token_usage(obj): + # type: (Any) -> Optional[Dict[str, Any]] + """ + Check multiple paths to extract token usage from different objects. + """ + possible_names = ("usage", "token_usage", "usage_metadata") + + message = _get_value(obj, "message") + if message is not None: + for name in possible_names: + usage = _get_value(message, name) + if usage is not None: + return usage + + llm_output = _get_value(obj, "llm_output") + if llm_output is not None: + for name in possible_names: + usage = _get_value(llm_output, name) + if usage is not None: + return usage + + # check for usage in the object itself + for name in possible_names: + usage = _get_value(obj, name) + if usage is not None: + return usage + + # no usage found anywhere + return None + + +def _record_token_usage(span, response): + # type: (Span, Any) -> None + token_usage = _get_token_usage(response) + if token_usage: + input_tokens, output_tokens, total_tokens = _extract_tokens(token_usage) + else: + input_tokens, output_tokens, total_tokens = _extract_tokens_from_generations( + response.generations + ) + + if input_tokens is not None: + span.set_data(SPANDATA.GEN_AI_USAGE_INPUT_TOKENS, input_tokens) + + if output_tokens is not None: + span.set_data(SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS, output_tokens) + + if total_tokens is not None: + span.set_data(SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS, total_tokens) + + +def _get_request_data(obj, args, kwargs): + # type: (Any, Any, Any) -> tuple[Optional[str], Optional[List[Any]]] + """ + Get the agent name and available tools for the agent. + """ + agent = getattr(obj, "agent", None) + runnable = getattr(agent, "runnable", None) + runnable_config = getattr(runnable, "config", {}) + tools = ( + getattr(obj, "tools", None) + or getattr(agent, "tools", None) + or runnable_config.get("tools") + or runnable_config.get("available_tools") + ) + tools = tools if tools and len(tools) > 0 else None + + try: + agent_name = None + if len(args) > 1: + agent_name = args[1].get("run_name") + if agent_name is None: + agent_name = runnable_config.get("run_name") + except Exception: + pass + + return (agent_name, tools) + + def _wrap_configure(f): # type: (Callable[..., Any]) -> Callable[..., Any] @@ -473,7 +592,6 @@ def new_configure( sentry_handler = SentryLangchainCallback( integration.max_spans, integration.include_prompts, - integration.tiktoken_encoding_name, ) if isinstance(local_callbacks, BaseCallbackManager): local_callbacks = local_callbacks.copy() @@ -495,3 +613,158 @@ def new_configure( ) return new_configure + + +def _wrap_agent_executor_invoke(f): + # type: (Callable[..., Any]) -> Callable[..., Any] + + @wraps(f) + def new_invoke(self, *args, **kwargs): + # type: (Any, Any, Any) -> Any + integration = sentry_sdk.get_client().get_integration(LangchainIntegration) + if integration is None: + return f(self, *args, **kwargs) + + agent_name, tools = _get_request_data(self, args, kwargs) + + with sentry_sdk.start_span( + op=OP.GEN_AI_INVOKE_AGENT, + name=f"invoke_agent {agent_name}" if agent_name else "invoke_agent", + origin=LangchainIntegration.origin, + ) as span: + if agent_name: + span.set_data(SPANDATA.GEN_AI_AGENT_NAME, agent_name) + + span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent") + span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, False) + + if tools: + set_data_normalized( + span, SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS, tools, unpack=False + ) + + # Run the agent + result = f(self, *args, **kwargs) + + input = result.get("input") + if ( + input is not None + and should_send_default_pii() + and integration.include_prompts + ): + set_data_normalized( + span, + SPANDATA.GEN_AI_REQUEST_MESSAGES, + [ + input, + ], + ) + + output = result.get("output") + if ( + output is not None + and should_send_default_pii() + and integration.include_prompts + ): + span.set_data(SPANDATA.GEN_AI_RESPONSE_TEXT, output) + + return result + + return new_invoke + + +def _wrap_agent_executor_stream(f): + # type: (Callable[..., Any]) -> Callable[..., Any] + + @wraps(f) + def new_stream(self, *args, **kwargs): + # type: (Any, Any, Any) -> Any + integration = sentry_sdk.get_client().get_integration(LangchainIntegration) + if integration is None: + return f(self, *args, **kwargs) + + agent_name, tools = _get_request_data(self, args, kwargs) + + span = sentry_sdk.start_span( + op=OP.GEN_AI_INVOKE_AGENT, + name=f"invoke_agent {agent_name}".strip(), + origin=LangchainIntegration.origin, + ) + span.__enter__() + + if agent_name: + span.set_data(SPANDATA.GEN_AI_AGENT_NAME, agent_name) + + span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent") + span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, True) + + if tools: + set_data_normalized( + span, SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS, tools, unpack=False + ) + + input = args[0].get("input") if len(args) >= 1 else None + if ( + input is not None + and should_send_default_pii() + and integration.include_prompts + ): + set_data_normalized( + span, + SPANDATA.GEN_AI_REQUEST_MESSAGES, + [ + input, + ], + ) + + # Run the agent + result = f(self, *args, **kwargs) + + old_iterator = result + + def new_iterator(): + # type: () -> Iterator[Any] + for event in old_iterator: + yield event + + try: + output = event.get("output") + except Exception: + output = None + + if ( + output is not None + and should_send_default_pii() + and integration.include_prompts + ): + span.set_data(SPANDATA.GEN_AI_RESPONSE_TEXT, output) + + span.__exit__(None, None, None) + + async def new_iterator_async(): + # type: () -> AsyncIterator[Any] + async for event in old_iterator: + yield event + + try: + output = event.get("output") + except Exception: + output = None + + if ( + output is not None + and should_send_default_pii() + and integration.include_prompts + ): + span.set_data(SPANDATA.GEN_AI_RESPONSE_TEXT, output) + + span.__exit__(None, None, None) + + if str(type(result)) == "": + result = new_iterator_async() + else: + result = new_iterator() + + return result + + return new_stream diff --git a/sentry_sdk/integrations/litestar.py b/sentry_sdk/integrations/litestar.py index 4e15081cba..2be4d376e0 100644 --- a/sentry_sdk/integrations/litestar.py +++ b/sentry_sdk/integrations/litestar.py @@ -85,6 +85,7 @@ def __init__(self, app, span_origin=LitestarIntegration.origin): transaction_style="endpoint", mechanism_type="asgi", span_origin=span_origin, + asgi_version=3, ) def _capture_request_exception(self, exc): @@ -116,7 +117,6 @@ def injection_wrapper(self, *args, **kwargs): *(kwargs.get("after_exception") or []), ] - SentryLitestarASGIMiddleware.__call__ = SentryLitestarASGIMiddleware._run_asgi3 # type: ignore middleware = kwargs.get("middleware") or [] kwargs["middleware"] = [SentryLitestarASGIMiddleware, *middleware] old__init__(self, *args, **kwargs) diff --git a/sentry_sdk/integrations/logging.py b/sentry_sdk/integrations/logging.py index a50512f622..15ff2ed233 100644 --- a/sentry_sdk/integrations/logging.py +++ b/sentry_sdk/integrations/logging.py @@ -12,6 +12,7 @@ event_from_exception, current_stacktrace, capture_internal_exceptions, + has_logs_enabled, ) from sentry_sdk.integrations import Integration @@ -344,7 +345,7 @@ def emit(self, record): if not client.is_active(): return - if not client.options["_experiments"].get("enable_logs", False): + if not has_logs_enabled(client.options): return self._capture_log_from_record(client, record) diff --git a/sentry_sdk/integrations/loguru.py b/sentry_sdk/integrations/loguru.py index df3ecf161a..b910b9a407 100644 --- a/sentry_sdk/integrations/loguru.py +++ b/sentry_sdk/integrations/loguru.py @@ -8,6 +8,7 @@ _BaseHandler, ) from sentry_sdk.logger import _log_level_to_otel +from sentry_sdk.utils import has_logs_enabled from typing import TYPE_CHECKING @@ -151,7 +152,7 @@ def loguru_sentry_logs_handler(message): if not client.is_active(): return - if not client.options["_experiments"].get("enable_logs", False): + if not has_logs_enabled(client.options): return record = message.record diff --git a/sentry_sdk/integrations/openai.py b/sentry_sdk/integrations/openai.py index 78fcdd49e2..187f795807 100644 --- a/sentry_sdk/integrations/openai.py +++ b/sentry_sdk/integrations/openai.py @@ -20,6 +20,11 @@ from sentry_sdk.tracing import Span try: + try: + from openai import NOT_GIVEN + except ImportError: + NOT_GIVEN = None + from openai.resources.chat.completions import Completions, AsyncCompletions from openai.resources import Embeddings, AsyncEmbeddings @@ -192,12 +197,13 @@ def _set_input_data(span, kwargs, operation, integration): } for key, attribute in kwargs_keys_to_attributes.items(): value = kwargs.get(key) - if value is not None: + + if value is not NOT_GIVEN and value is not None: set_data_normalized(span, attribute, value) # Input attributes: Tools tools = kwargs.get("tools") - if tools is not None and len(tools) > 0: + if tools is not NOT_GIVEN and tools is not None and len(tools) > 0: set_data_normalized( span, SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS, safe_serialize(tools) ) diff --git a/sentry_sdk/integrations/openfeature.py b/sentry_sdk/integrations/openfeature.py index e2b33d83f2..3ac73edd93 100644 --- a/sentry_sdk/integrations/openfeature.py +++ b/sentry_sdk/integrations/openfeature.py @@ -1,4 +1,4 @@ -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Any from sentry_sdk.feature_flags import add_feature_flag from sentry_sdk.integrations import DidNotEnable, Integration @@ -8,7 +8,6 @@ from openfeature.hook import Hook if TYPE_CHECKING: - from openfeature.flag_evaluation import FlagEvaluationDetails from openfeature.hook import HookContext, HookHints except ImportError: raise DidNotEnable("OpenFeature is not installed") @@ -25,9 +24,8 @@ def setup_once(): class OpenFeatureHook(Hook): - def after(self, hook_context, details, hints): - # type: (HookContext, FlagEvaluationDetails[bool], HookHints) -> None + # type: (Any, Any, Any) -> None if isinstance(details.value, bool): add_feature_flag(details.flag_key, details.value) diff --git a/sentry_sdk/integrations/quart.py b/sentry_sdk/integrations/quart.py index 51306bb4cd..64f7e0bcd2 100644 --- a/sentry_sdk/integrations/quart.py +++ b/sentry_sdk/integrations/quart.py @@ -95,8 +95,8 @@ async def sentry_patched_asgi_app(self, scope, receive, send): middleware = SentryAsgiMiddleware( lambda *a, **kw: old_app(self, *a, **kw), span_origin=QuartIntegration.origin, + asgi_version=3, ) - middleware.__call__ = middleware._run_asgi3 return await middleware(scope, receive, send) Quart.__call__ = sentry_patched_asgi_app diff --git a/sentry_sdk/integrations/starlette.py b/sentry_sdk/integrations/starlette.py index d0f0bf2045..c7ce40618b 100644 --- a/sentry_sdk/integrations/starlette.py +++ b/sentry_sdk/integrations/starlette.py @@ -29,7 +29,6 @@ capture_internal_exceptions, ensure_integration_enabled, event_from_exception, - logger, parse_version, transaction_from_function, ) @@ -403,9 +402,9 @@ async def _sentry_patched_asgi_app(self, scope, receive, send): if integration else DEFAULT_HTTP_METHODS_TO_CAPTURE ), + asgi_version=3, ) - middleware.__call__ = middleware._run_asgi3 return await middleware(scope, receive, send) Starlette.__call__ = _sentry_patched_asgi_app @@ -723,9 +722,6 @@ def _set_transaction_name_and_source(scope, transaction_style, request): source = TransactionSource.ROUTE scope.set_transaction_name(name, source=source) - logger.debug( - "[Starlette] Set transaction name and source on scope: %s / %s", name, source - ) def _get_transaction_from_middleware(app, asgi_scope, integration): diff --git a/sentry_sdk/integrations/starlite.py b/sentry_sdk/integrations/starlite.py index 24707a18b1..b402aa2184 100644 --- a/sentry_sdk/integrations/starlite.py +++ b/sentry_sdk/integrations/starlite.py @@ -65,6 +65,7 @@ def __init__(self, app, span_origin=StarliteIntegration.origin): transaction_style="endpoint", mechanism_type="asgi", span_origin=span_origin, + asgi_version=3, ) @@ -94,7 +95,6 @@ def injection_wrapper(self, *args, **kwargs): ] ) - SentryStarliteASGIMiddleware.__call__ = SentryStarliteASGIMiddleware._run_asgi3 # type: ignore middleware = kwargs.get("middleware") or [] kwargs["middleware"] = [SentryStarliteASGIMiddleware, *middleware] old__init__(self, *args, **kwargs) diff --git a/sentry_sdk/scope.py b/sentry_sdk/scope.py index 73bf43573e..3356de57a8 100644 --- a/sentry_sdk/scope.py +++ b/sentry_sdk/scope.py @@ -48,7 +48,7 @@ from typing import TYPE_CHECKING if TYPE_CHECKING: - from collections.abc import Mapping, MutableMapping + from collections.abc import Mapping from typing import Any from typing import Callable @@ -238,24 +238,24 @@ def __copy__(self): rv._name = self._name rv._fingerprint = self._fingerprint rv._transaction = self._transaction - rv._transaction_info = dict(self._transaction_info) + rv._transaction_info = self._transaction_info.copy() rv._user = self._user - rv._tags = dict(self._tags) - rv._contexts = dict(self._contexts) - rv._extras = dict(self._extras) + rv._tags = self._tags.copy() + rv._contexts = self._contexts.copy() + rv._extras = self._extras.copy() rv._breadcrumbs = copy(self._breadcrumbs) - rv._n_breadcrumbs_truncated = copy(self._n_breadcrumbs_truncated) - rv._event_processors = list(self._event_processors) - rv._error_processors = list(self._error_processors) + rv._n_breadcrumbs_truncated = self._n_breadcrumbs_truncated + rv._event_processors = self._event_processors.copy() + rv._error_processors = self._error_processors.copy() rv._propagation_context = self._propagation_context rv._should_capture = self._should_capture rv._span = self._span rv._session = self._session rv._force_auto_session_tracking = self._force_auto_session_tracking - rv._attachments = list(self._attachments) + rv._attachments = self._attachments.copy() rv._profile = self._profile @@ -683,12 +683,12 @@ def clear(self): self._level = None # type: Optional[LogLevelStr] self._fingerprint = None # type: Optional[List[str]] self._transaction = None # type: Optional[str] - self._transaction_info = {} # type: MutableMapping[str, str] + self._transaction_info = {} # type: dict[str, str] self._user = None # type: Optional[Dict[str, Any]] self._tags = {} # type: Dict[str, Any] self._contexts = {} # type: Dict[str, Dict[str, Any]] - self._extras = {} # type: MutableMapping[str, Any] + self._extras = {} # type: dict[str, Any] self._attachments = [] # type: List[Attachment] self.clear_breadcrumbs() diff --git a/sentry_sdk/tracing.py b/sentry_sdk/tracing.py index fc40221b9f..c9b357305a 100644 --- a/sentry_sdk/tracing.py +++ b/sentry_sdk/tracing.py @@ -5,7 +5,7 @@ from enum import Enum import sentry_sdk -from sentry_sdk.consts import INSTRUMENTER, SPANSTATUS, SPANDATA +from sentry_sdk.consts import INSTRUMENTER, SPANSTATUS, SPANDATA, SPANTEMPLATE from sentry_sdk.profiler.continuous_profiler import get_profiler_id from sentry_sdk.utils import ( get_current_thread_meta, @@ -257,8 +257,8 @@ class Span: """ __slots__ = ( - "trace_id", - "span_id", + "_trace_id", + "_span_id", "parent_span_id", "same_process_as_parent", "sampled", @@ -301,8 +301,8 @@ def __init__( name=None, # type: Optional[str] ): # type: (...) -> None - self.trace_id = trace_id or uuid.uuid4().hex - self.span_id = span_id or uuid.uuid4().hex[16:] + self._trace_id = trace_id + self._span_id = span_id self.parent_span_id = parent_span_id self.same_process_as_parent = same_process_as_parent self.sampled = sampled @@ -356,6 +356,32 @@ def init_span_recorder(self, maxlen): if self._span_recorder is None: self._span_recorder = _SpanRecorder(maxlen) + @property + def trace_id(self): + # type: () -> str + if not self._trace_id: + self._trace_id = uuid.uuid4().hex + + return self._trace_id + + @trace_id.setter + def trace_id(self, value): + # type: (str) -> None + self._trace_id = value + + @property + def span_id(self): + # type: () -> str + if not self._span_id: + self._span_id = uuid.uuid4().hex[16:] + + return self._span_id + + @span_id.setter + def span_id(self, value): + # type: (str) -> None + self._span_id = value + def _get_local_aggregator(self): # type: (...) -> LocalAggregator rv = self._local_aggregator @@ -602,6 +628,10 @@ def set_data(self, key, value): # type: (str, Any) -> None self._data[key] = value + def update_data(self, data): + # type: (Dict[str, Any]) -> None + self._data.update(data) + def set_flag(self, flag, result): # type: (str, bool) -> None if len(self._flags) < self._flags_capacity: @@ -818,7 +848,6 @@ def __init__( # type: ignore[misc] **kwargs, # type: Unpack[SpanKwargs] ): # type: (...) -> None - super().__init__(**kwargs) self.name = name @@ -1275,6 +1304,10 @@ def set_data(self, key, value): # type: (str, Any) -> None pass + def update_data(self, data): + # type: (Dict[str, Any]) -> None + pass + def set_status(self, value): # type: (str) -> None pass @@ -1332,43 +1365,103 @@ def _set_initial_sampling_decision(self, sampling_context): if TYPE_CHECKING: @overload - def trace(func=None): - # type: (None) -> Callable[[Callable[P, R]], Callable[P, R]] + def trace( + func=None, *, op=None, name=None, attributes=None, template=SPANTEMPLATE.DEFAULT + ): + # type: (None, Optional[str], Optional[str], Optional[dict[str, Any]], SPANTEMPLATE) -> Callable[[Callable[P, R]], Callable[P, R]] + # Handles: @trace() and @trace(op="custom") pass @overload def trace(func): # type: (Callable[P, R]) -> Callable[P, R] + # Handles: @trace pass -def trace(func=None): - # type: (Optional[Callable[P, R]]) -> Union[Callable[P, R], Callable[[Callable[P, R]], Callable[P, R]]] +def trace( + func=None, *, op=None, name=None, attributes=None, template=SPANTEMPLATE.DEFAULT +): + # type: (Optional[Callable[P, R]], Optional[str], Optional[str], Optional[dict[str, Any]], SPANTEMPLATE) -> Union[Callable[P, R], Callable[[Callable[P, R]], Callable[P, R]]] """ - Decorator to start a child span under the existing current transaction. - If there is no current transaction, then nothing will be traced. - - .. code-block:: - :caption: Usage + Decorator to start a child span around a function call. + + This decorator automatically creates a new span when the decorated function + is called, and finishes the span when the function returns or raises an exception. + + :param func: The function to trace. When used as a decorator without parentheses, + this is the function being decorated. When used with parameters (e.g., + ``@trace(op="custom")``, this should be None. + :type func: Callable or None + + :param op: The operation name for the span. This is a high-level description + of what the span represents (e.g., "http.client", "db.query"). + You can use predefined constants from :py:class:`sentry_sdk.consts.OP` + or provide your own string. If not provided, a default operation will + be assigned based on the template. + :type op: str or None + + :param name: The human-readable name/description for the span. If not provided, + defaults to the function name. This provides more specific details about + what the span represents (e.g., "GET /api/users", "process_user_data"). + :type name: str or None + + :param attributes: A dictionary of key-value pairs to add as attributes to the span. + Attribute values must be strings, integers, floats, or booleans. These + attributes provide additional context about the span's execution. + :type attributes: dict[str, Any] or None + + :param template: The type of span to create. This determines what kind of + span instrumentation and data collection will be applied. Use predefined + constants from :py:class:`sentry_sdk.consts.SPANTEMPLATE`. + The default is `SPANTEMPLATE.DEFAULT` which is the right choice for most + use cases. + :type template: :py:class:`sentry_sdk.consts.SPANTEMPLATE` + + :returns: When used as ``@trace``, returns the decorated function. When used as + ``@trace(...)`` with parameters, returns a decorator function. + :rtype: Callable or decorator function + + Example:: import sentry_sdk + from sentry_sdk.consts import OP, SPANTEMPLATE + # Simple usage with default values @sentry_sdk.trace - def my_function(): - ... + def process_data(): + # Function implementation + pass - @sentry_sdk.trace - async def my_async_function(): - ... + # With custom parameters + @sentry_sdk.trace( + op=OP.DB_QUERY, + name="Get user data", + attributes={"postgres": True} + ) + def make_db_query(sql): + # Function implementation + pass + + # With a custom template + @sentry_sdk.trace(template=SPANTEMPLATE.AI_TOOL) + def calculate_interest_rate(amount, rate, years): + # Function implementation + pass """ - from sentry_sdk.tracing_utils import start_child_span_decorator + from sentry_sdk.tracing_utils import create_span_decorator + + decorator = create_span_decorator( + op=op, + name=name, + attributes=attributes, + template=template, + ) - # This patterns allows usage of both @sentry_traced and @sentry_traced(...) - # See https://stackoverflow.com/questions/52126071/decorator-with-arguments-avoid-parenthesis-when-no-arguments/52126278 if func: - return start_child_span_decorator(func) + return decorator(func) else: - return start_child_span_decorator + return decorator # Circular imports diff --git a/sentry_sdk/tracing_utils.py b/sentry_sdk/tracing_utils.py index 552f4fd59a..b31d3d85c5 100644 --- a/sentry_sdk/tracing_utils.py +++ b/sentry_sdk/tracing_utils.py @@ -1,4 +1,5 @@ import contextlib +import functools import inspect import os import re @@ -6,13 +7,12 @@ from collections.abc import Mapping from datetime import timedelta from decimal import ROUND_DOWN, Decimal, DefaultContext, localcontext -from functools import wraps from random import Random from urllib.parse import quote, unquote import uuid import sentry_sdk -from sentry_sdk.consts import OP, SPANDATA +from sentry_sdk.consts import OP, SPANDATA, SPANTEMPLATE from sentry_sdk.utils import ( capture_internal_exceptions, filename_for_module, @@ -20,6 +20,7 @@ logger, match_regex_list, qualname_from_function, + safe_repr, to_string, try_convert, is_sentry_url, @@ -770,70 +771,116 @@ def normalize_incoming_data(incoming_data): return data -def start_child_span_decorator(func): - # type: (Any) -> Any +def create_span_decorator( + op=None, name=None, attributes=None, template=SPANTEMPLATE.DEFAULT +): + # type: (Optional[Union[str, OP]], Optional[str], Optional[dict[str, Any]], SPANTEMPLATE) -> Any """ - Decorator to add child spans for functions. - - See also ``sentry_sdk.tracing.trace()``. + Create a span decorator that can wrap both sync and async functions. + + :param op: The operation type for the span. + :type op: str or :py:class:`sentry_sdk.consts.OP` or None + :param name: The name of the span. + :type name: str or None + :param attributes: Additional attributes to set on the span. + :type attributes: dict or None + :param template: The type of span to create. This determines what kind of + span instrumentation and data collection will be applied. Use predefined + constants from :py:class:`sentry_sdk.consts.SPANTEMPLATE`. + The default is `SPANTEMPLATE.DEFAULT` which is the right choice for most + use cases. + :type template: :py:class:`sentry_sdk.consts.SPANTEMPLATE` """ - # Asynchronous case - if inspect.iscoroutinefunction(func): + from sentry_sdk.scope import should_send_default_pii - @wraps(func) - async def func_with_tracing(*args, **kwargs): - # type: (*Any, **Any) -> Any + def span_decorator(f): + # type: (Any) -> Any + """ + Decorator to create a span for the given function. + """ - span = get_current_span() + @functools.wraps(f) + async def async_wrapper(*args, **kwargs): + # type: (*Any, **Any) -> Any + current_span = get_current_span() - if span is None: + if current_span is None: logger.debug( "Cannot create a child span for %s. " "Please start a Sentry transaction before calling this function.", - qualname_from_function(func), + qualname_from_function(f), + ) + return await f(*args, **kwargs) + + span_op = op or _get_span_op(template) + function_name = name or qualname_from_function(f) or "" + span_name = _get_span_name(template, function_name, kwargs) + send_pii = should_send_default_pii() + + with current_span.start_child( + op=span_op, + name=span_name, + ) as span: + span.update_data(attributes or {}) + _set_input_attributes( + span, template, send_pii, function_name, f, args, kwargs ) - return await func(*args, **kwargs) - with span.start_child( - op=OP.FUNCTION, - name=qualname_from_function(func), - ): - return await func(*args, **kwargs) + result = await f(*args, **kwargs) + + _set_output_attributes(span, template, send_pii, result) + + return result try: - func_with_tracing.__signature__ = inspect.signature(func) # type: ignore[attr-defined] + async_wrapper.__signature__ = inspect.signature(f) # type: ignore[attr-defined] except Exception: pass - # Synchronous case - else: - - @wraps(func) - def func_with_tracing(*args, **kwargs): + @functools.wraps(f) + def sync_wrapper(*args, **kwargs): # type: (*Any, **Any) -> Any + current_span = get_current_span() - span = get_current_span() - - if span is None: + if current_span is None: logger.debug( "Cannot create a child span for %s. " "Please start a Sentry transaction before calling this function.", - qualname_from_function(func), + qualname_from_function(f), + ) + return f(*args, **kwargs) + + span_op = op or _get_span_op(template) + function_name = name or qualname_from_function(f) or "" + span_name = _get_span_name(template, function_name, kwargs) + send_pii = should_send_default_pii() + + with current_span.start_child( + op=span_op, + name=span_name, + ) as span: + span.update_data(attributes or {}) + _set_input_attributes( + span, template, send_pii, function_name, f, args, kwargs ) - return func(*args, **kwargs) - with span.start_child( - op=OP.FUNCTION, - name=qualname_from_function(func), - ): - return func(*args, **kwargs) + result = f(*args, **kwargs) + + _set_output_attributes(span, template, send_pii, result) + + return result try: - func_with_tracing.__signature__ = inspect.signature(func) # type: ignore[attr-defined] + sync_wrapper.__signature__ = inspect.signature(f) # type: ignore[attr-defined] except Exception: pass - return func_with_tracing + if inspect.iscoroutinefunction(f): + return async_wrapper + else: + return sync_wrapper + + return span_decorator def get_current_span(scope=None): @@ -896,6 +943,241 @@ def _sample_rand_range(parent_sampled, sample_rate): return sample_rate, 1.0 +def _get_value(source, key): + # type: (Any, str) -> Optional[Any] + """ + Gets a value from a source object. The source can be a dict or an object. + It is checked for dictionary keys and object attributes. + """ + value = None + if isinstance(source, dict): + value = source.get(key) + else: + if hasattr(source, key): + try: + value = getattr(source, key) + except Exception: + value = None + return value + + +def _get_span_name(template, name, kwargs=None): + # type: (Union[str, SPANTEMPLATE], str, Optional[dict[str, Any]]) -> str + """ + Get the name of the span based on the template and the name. + """ + span_name = name + + if template == SPANTEMPLATE.AI_CHAT: + model = None + if kwargs: + for key in ("model", "model_name"): + if kwargs.get(key) and isinstance(kwargs[key], str): + model = kwargs[key] + break + + span_name = f"chat {model}" if model else "chat" + + elif template == SPANTEMPLATE.AI_AGENT: + span_name = f"invoke_agent {name}" + + elif template == SPANTEMPLATE.AI_TOOL: + span_name = f"execute_tool {name}" + + return span_name + + +def _get_span_op(template): + # type: (Union[str, SPANTEMPLATE]) -> str + """ + Get the operation of the span based on the template. + """ + mapping = { + SPANTEMPLATE.AI_CHAT: OP.GEN_AI_CHAT, + SPANTEMPLATE.AI_AGENT: OP.GEN_AI_INVOKE_AGENT, + SPANTEMPLATE.AI_TOOL: OP.GEN_AI_EXECUTE_TOOL, + } # type: dict[Union[str, SPANTEMPLATE], Union[str, OP]] + op = mapping.get(template, OP.FUNCTION) + + return str(op) + + +def _get_input_attributes(template, send_pii, args, kwargs): + # type: (Union[str, SPANTEMPLATE], bool, tuple[Any, ...], dict[str, Any]) -> dict[str, Any] + """ + Get input attributes for the given span template. + """ + attributes = {} # type: dict[str, Any] + + if template in [SPANTEMPLATE.AI_AGENT, SPANTEMPLATE.AI_TOOL, SPANTEMPLATE.AI_CHAT]: + mapping = { + "model": (SPANDATA.GEN_AI_REQUEST_MODEL, str), + "model_name": (SPANDATA.GEN_AI_REQUEST_MODEL, str), + "agent": (SPANDATA.GEN_AI_AGENT_NAME, str), + "agent_name": (SPANDATA.GEN_AI_AGENT_NAME, str), + "max_tokens": (SPANDATA.GEN_AI_REQUEST_MAX_TOKENS, int), + "frequency_penalty": (SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY, float), + "presence_penalty": (SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY, float), + "temperature": (SPANDATA.GEN_AI_REQUEST_TEMPERATURE, float), + "top_p": (SPANDATA.GEN_AI_REQUEST_TOP_P, float), + "top_k": (SPANDATA.GEN_AI_REQUEST_TOP_K, int), + } + + def _set_from_key(key, value): + # type: (str, Any) -> None + if key in mapping: + (attribute, data_type) = mapping[key] + if value is not None and isinstance(value, data_type): + attributes[attribute] = value + + for key, value in list(kwargs.items()): + if key == "prompt" and isinstance(value, str): + attributes.setdefault(SPANDATA.GEN_AI_REQUEST_MESSAGES, []).append( + {"role": "user", "content": value} + ) + continue + + if key == "system_prompt" and isinstance(value, str): + attributes.setdefault(SPANDATA.GEN_AI_REQUEST_MESSAGES, []).append( + {"role": "system", "content": value} + ) + continue + + _set_from_key(key, value) + + if template == SPANTEMPLATE.AI_TOOL and send_pii: + attributes[SPANDATA.GEN_AI_TOOL_INPUT] = safe_repr( + {"args": args, "kwargs": kwargs} + ) + + # Coerce to string + if SPANDATA.GEN_AI_REQUEST_MESSAGES in attributes: + attributes[SPANDATA.GEN_AI_REQUEST_MESSAGES] = safe_repr( + attributes[SPANDATA.GEN_AI_REQUEST_MESSAGES] + ) + + return attributes + + +def _get_usage_attributes(usage): + # type: (Any) -> dict[str, Any] + """ + Get usage attributes. + """ + attributes = {} + + def _set_from_keys(attribute, keys): + # type: (str, tuple[str, ...]) -> None + for key in keys: + value = _get_value(usage, key) + if value is not None and isinstance(value, int): + attributes[attribute] = value + + _set_from_keys( + SPANDATA.GEN_AI_USAGE_INPUT_TOKENS, + ("prompt_tokens", "input_tokens"), + ) + _set_from_keys( + SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS, + ("completion_tokens", "output_tokens"), + ) + _set_from_keys( + SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS, + ("total_tokens",), + ) + + return attributes + + +def _get_output_attributes(template, send_pii, result): + # type: (Union[str, SPANTEMPLATE], bool, Any) -> dict[str, Any] + """ + Get output attributes for the given span template. + """ + attributes = {} # type: dict[str, Any] + + if template in [SPANTEMPLATE.AI_AGENT, SPANTEMPLATE.AI_TOOL, SPANTEMPLATE.AI_CHAT]: + with capture_internal_exceptions(): + # Usage from result, result.usage, and result.metadata.usage + usage_candidates = [result] + + usage = _get_value(result, "usage") + usage_candidates.append(usage) + + meta = _get_value(result, "metadata") + usage = _get_value(meta, "usage") + usage_candidates.append(usage) + + for usage_candidate in usage_candidates: + if usage_candidate is not None: + attributes.update(_get_usage_attributes(usage_candidate)) + + # Response model + model_name = _get_value(result, "model") + if model_name is not None and isinstance(model_name, str): + attributes[SPANDATA.GEN_AI_RESPONSE_MODEL] = model_name + + model_name = _get_value(result, "model_name") + if model_name is not None and isinstance(model_name, str): + attributes[SPANDATA.GEN_AI_RESPONSE_MODEL] = model_name + + # Tool output + if template == SPANTEMPLATE.AI_TOOL and send_pii: + attributes[SPANDATA.GEN_AI_TOOL_OUTPUT] = safe_repr(result) + + return attributes + + +def _set_input_attributes(span, template, send_pii, name, f, args, kwargs): + # type: (Span, Union[str, SPANTEMPLATE], bool, str, Any, tuple[Any, ...], dict[str, Any]) -> None + """ + Set span input attributes based on the given span template. + + :param span: The span to set attributes on. + :param template: The template to use to set attributes on the span. + :param send_pii: Whether to send PII data. + :param f: The wrapped function. + :param args: The arguments to the wrapped function. + :param kwargs: The keyword arguments to the wrapped function. + """ + attributes = {} # type: dict[str, Any] + + if template == SPANTEMPLATE.AI_AGENT: + attributes = { + SPANDATA.GEN_AI_OPERATION_NAME: "invoke_agent", + SPANDATA.GEN_AI_AGENT_NAME: name, + } + elif template == SPANTEMPLATE.AI_CHAT: + attributes = { + SPANDATA.GEN_AI_OPERATION_NAME: "chat", + } + elif template == SPANTEMPLATE.AI_TOOL: + attributes = { + SPANDATA.GEN_AI_OPERATION_NAME: "execute_tool", + SPANDATA.GEN_AI_TOOL_NAME: name, + } + + docstring = f.__doc__ + if docstring is not None: + attributes[SPANDATA.GEN_AI_TOOL_DESCRIPTION] = docstring + + attributes.update(_get_input_attributes(template, send_pii, args, kwargs)) + span.update_data(attributes or {}) + + +def _set_output_attributes(span, template, send_pii, result): + # type: (Span, Union[str, SPANTEMPLATE], bool, Any) -> None + """ + Set span output attributes based on the given span template. + + :param span: The span to set attributes on. + :param template: The template to use to set attributes on the span. + :param send_pii: Whether to send PII data. + :param result: The result of the wrapped function. + """ + span.update_data(_get_output_attributes(template, send_pii, result) or {}) + + # Circular imports from sentry_sdk.tracing import ( BAGGAGE_HEADER_NAME, diff --git a/sentry_sdk/utils.py b/sentry_sdk/utils.py index 9c6f2cfc3b..b0f3fa4a4c 100644 --- a/sentry_sdk/utils.py +++ b/sentry_sdk/utils.py @@ -59,7 +59,7 @@ from gevent.hub import Hub - from sentry_sdk._types import Event, ExcInfo + from sentry_sdk._types import Event, ExcInfo, Log, Hint P = ParamSpec("P") R = TypeVar("R") @@ -1984,3 +1984,24 @@ def serialize_item(item): return json.dumps(serialized, default=str) except Exception: return str(data) + + +def has_logs_enabled(options): + # type: (Optional[dict[str, Any]]) -> bool + if options is None: + return False + + return bool( + options.get("enable_logs", False) + or options["_experiments"].get("enable_logs", False) + ) + + +def get_before_send_log(options): + # type: (Optional[dict[str, Any]]) -> Optional[Callable[[Log, Hint], Optional[Log]]] + if options is None: + return None + + return options.get("before_send_log") or options["_experiments"].get( + "before_send_log" + ) diff --git a/setup.py b/setup.py index 11b02cbca8..dd91f8bb37 100644 --- a/setup.py +++ b/setup.py @@ -21,7 +21,7 @@ def get_file_text(file_name): setup( name="sentry-sdk", - version="2.34.1", + version="2.35.0", author="Sentry Team and Contributors", author_email="hello@sentry.io", url="https://github.com/getsentry/sentry-python", diff --git a/tests/integrations/anthropic/test_anthropic.py b/tests/integrations/anthropic/test_anthropic.py index e6e1a40aa9..eba07a1df6 100644 --- a/tests/integrations/anthropic/test_anthropic.py +++ b/tests/integrations/anthropic/test_anthropic.py @@ -20,7 +20,7 @@ async def __call__(self, *args, **kwargs): from anthropic.types.message_delta_event import MessageDeltaEvent from anthropic.types.message_start_event import MessageStartEvent -from sentry_sdk.integrations.anthropic import _add_ai_data_to_span, _collect_ai_data +from sentry_sdk.integrations.anthropic import _set_output_data, _collect_ai_data from sentry_sdk.utils import package_version try: @@ -112,23 +112,27 @@ def test_nonstreaming_create_message( assert len(event["spans"]) == 1 (span,) = event["spans"] - assert span["op"] == OP.ANTHROPIC_MESSAGES_CREATE - assert span["description"] == "Anthropic messages create" - assert span["data"][SPANDATA.AI_MODEL_ID] == "model" + assert span["op"] == OP.GEN_AI_CHAT + assert span["description"] == "chat model" + assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" if send_default_pii and include_prompts: - assert span["data"][SPANDATA.AI_INPUT_MESSAGES] == messages - assert span["data"][SPANDATA.AI_RESPONSES] == [ - {"type": "text", "text": "Hi, I'm Claude."} - ] + assert ( + span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + == '[{"role": "user", "content": "Hello, Claude"}]' + ) + assert ( + span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] + == '[{"text": "Hi, I\'m Claude.", "type": "text"}]' + ) else: - assert SPANDATA.AI_INPUT_MESSAGES not in span["data"] - assert SPANDATA.AI_RESPONSES not in span["data"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] - assert span["data"]["gen_ai.usage.input_tokens"] == 10 - assert span["data"]["gen_ai.usage.output_tokens"] == 20 - assert span["data"]["gen_ai.usage.total_tokens"] == 30 - assert span["data"][SPANDATA.AI_STREAMING] is False + assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 + assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20 + assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30 + assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is False @pytest.mark.asyncio @@ -180,23 +184,27 @@ async def test_nonstreaming_create_message_async( assert len(event["spans"]) == 1 (span,) = event["spans"] - assert span["op"] == OP.ANTHROPIC_MESSAGES_CREATE - assert span["description"] == "Anthropic messages create" - assert span["data"][SPANDATA.AI_MODEL_ID] == "model" + assert span["op"] == OP.GEN_AI_CHAT + assert span["description"] == "chat model" + assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" if send_default_pii and include_prompts: - assert span["data"][SPANDATA.AI_INPUT_MESSAGES] == messages - assert span["data"][SPANDATA.AI_RESPONSES] == [ - {"type": "text", "text": "Hi, I'm Claude."} - ] + assert ( + span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + == '[{"role": "user", "content": "Hello, Claude"}]' + ) + assert ( + span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] + == '[{"text": "Hi, I\'m Claude.", "type": "text"}]' + ) else: - assert SPANDATA.AI_INPUT_MESSAGES not in span["data"] - assert SPANDATA.AI_RESPONSES not in span["data"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] - assert span["data"]["gen_ai.usage.input_tokens"] == 10 - assert span["data"]["gen_ai.usage.output_tokens"] == 20 - assert span["data"]["gen_ai.usage.total_tokens"] == 30 - assert span["data"][SPANDATA.AI_STREAMING] is False + assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 + assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20 + assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30 + assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is False @pytest.mark.parametrize( @@ -279,24 +287,28 @@ def test_streaming_create_message( assert len(event["spans"]) == 1 (span,) = event["spans"] - assert span["op"] == OP.ANTHROPIC_MESSAGES_CREATE - assert span["description"] == "Anthropic messages create" - assert span["data"][SPANDATA.AI_MODEL_ID] == "model" + assert span["op"] == OP.GEN_AI_CHAT + assert span["description"] == "chat model" + assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" if send_default_pii and include_prompts: - assert span["data"][SPANDATA.AI_INPUT_MESSAGES] == messages - assert span["data"][SPANDATA.AI_RESPONSES] == [ - {"type": "text", "text": "Hi! I'm Claude!"} - ] + assert ( + span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + == '[{"role": "user", "content": "Hello, Claude"}]' + ) + assert ( + span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] + == '[{"text": "Hi! I\'m Claude!", "type": "text"}]' + ) else: - assert SPANDATA.AI_INPUT_MESSAGES not in span["data"] - assert SPANDATA.AI_RESPONSES not in span["data"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] - assert span["data"]["gen_ai.usage.input_tokens"] == 10 - assert span["data"]["gen_ai.usage.output_tokens"] == 30 - assert span["data"]["gen_ai.usage.total_tokens"] == 40 - assert span["data"][SPANDATA.AI_STREAMING] is True + assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 + assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 30 + assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 40 + assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True @pytest.mark.asyncio @@ -382,24 +394,28 @@ async def test_streaming_create_message_async( assert len(event["spans"]) == 1 (span,) = event["spans"] - assert span["op"] == OP.ANTHROPIC_MESSAGES_CREATE - assert span["description"] == "Anthropic messages create" - assert span["data"][SPANDATA.AI_MODEL_ID] == "model" + assert span["op"] == OP.GEN_AI_CHAT + assert span["description"] == "chat model" + assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" if send_default_pii and include_prompts: - assert span["data"][SPANDATA.AI_INPUT_MESSAGES] == messages - assert span["data"][SPANDATA.AI_RESPONSES] == [ - {"type": "text", "text": "Hi! I'm Claude!"} - ] + assert ( + span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + == '[{"role": "user", "content": "Hello, Claude"}]' + ) + assert ( + span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] + == '[{"text": "Hi! I\'m Claude!", "type": "text"}]' + ) else: - assert SPANDATA.AI_INPUT_MESSAGES not in span["data"] - assert SPANDATA.AI_RESPONSES not in span["data"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] - assert span["data"]["gen_ai.usage.input_tokens"] == 10 - assert span["data"]["gen_ai.usage.output_tokens"] == 30 - assert span["data"]["gen_ai.usage.total_tokens"] == 40 - assert span["data"][SPANDATA.AI_STREAMING] is True + assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 + assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 30 + assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 40 + assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True @pytest.mark.skipif( @@ -512,23 +528,27 @@ def test_streaming_create_message_with_input_json_delta( assert len(event["spans"]) == 1 (span,) = event["spans"] - assert span["op"] == OP.ANTHROPIC_MESSAGES_CREATE - assert span["description"] == "Anthropic messages create" - assert span["data"][SPANDATA.AI_MODEL_ID] == "model" + assert span["op"] == OP.GEN_AI_CHAT + assert span["description"] == "chat model" + assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" if send_default_pii and include_prompts: - assert span["data"][SPANDATA.AI_INPUT_MESSAGES] == messages - assert span["data"][SPANDATA.AI_RESPONSES] == [ - {"text": "{'location': 'San Francisco, CA'}", "type": "text"} - ] + assert ( + span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + == '[{"role": "user", "content": "What is the weather like in San Francisco?"}]' + ) + assert ( + span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] + == '[{"text": "{\'location\': \'San Francisco, CA\'}", "type": "text"}]' + ) else: - assert SPANDATA.AI_INPUT_MESSAGES not in span["data"] - assert SPANDATA.AI_RESPONSES not in span["data"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] - assert span["data"]["gen_ai.usage.input_tokens"] == 366 - assert span["data"]["gen_ai.usage.output_tokens"] == 51 - assert span["data"]["gen_ai.usage.total_tokens"] == 417 - assert span["data"][SPANDATA.AI_STREAMING] is True + assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 366 + assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 51 + assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 417 + assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True @pytest.mark.asyncio @@ -648,24 +668,28 @@ async def test_streaming_create_message_with_input_json_delta_async( assert len(event["spans"]) == 1 (span,) = event["spans"] - assert span["op"] == OP.ANTHROPIC_MESSAGES_CREATE - assert span["description"] == "Anthropic messages create" - assert span["data"][SPANDATA.AI_MODEL_ID] == "model" + assert span["op"] == OP.GEN_AI_CHAT + assert span["description"] == "chat model" + assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" if send_default_pii and include_prompts: - assert span["data"][SPANDATA.AI_INPUT_MESSAGES] == messages - assert span["data"][SPANDATA.AI_RESPONSES] == [ - {"text": "{'location': 'San Francisco, CA'}", "type": "text"} - ] + assert ( + span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + == '[{"role": "user", "content": "What is the weather like in San Francisco?"}]' + ) + assert ( + span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] + == '[{"text": "{\'location\': \'San Francisco, CA\'}", "type": "text"}]' + ) else: - assert SPANDATA.AI_INPUT_MESSAGES not in span["data"] - assert SPANDATA.AI_RESPONSES not in span["data"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] - assert span["data"]["gen_ai.usage.input_tokens"] == 366 - assert span["data"]["gen_ai.usage.output_tokens"] == 51 - assert span["data"]["gen_ai.usage.total_tokens"] == 417 - assert span["data"][SPANDATA.AI_STREAMING] is True + assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 366 + assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 51 + assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 417 + assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True def test_exception_message_create(sentry_init, capture_events): @@ -770,15 +794,16 @@ def test_collect_ai_data_with_input_json_delta(): index=0, type="content_block_delta", ) - + model = None input_tokens = 10 output_tokens = 20 content_blocks = [] - new_input_tokens, new_output_tokens, new_content_blocks = _collect_ai_data( - event, input_tokens, output_tokens, content_blocks + model, new_input_tokens, new_output_tokens, new_content_blocks = _collect_ai_data( + event, model, input_tokens, output_tokens, content_blocks ) + assert model is None assert new_input_tokens == input_tokens assert new_output_tokens == output_tokens assert new_content_blocks == ["test"] @@ -788,7 +813,7 @@ def test_collect_ai_data_with_input_json_delta(): ANTHROPIC_VERSION < (0, 27), reason="Versions <0.27.0 do not include InputJSONDelta.", ) -def test_add_ai_data_to_span_with_input_json_delta(sentry_init): +def test_set_output_data_with_input_json_delta(sentry_init): sentry_init( integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, @@ -798,19 +823,20 @@ def test_add_ai_data_to_span_with_input_json_delta(sentry_init): with start_transaction(name="test"): span = start_span() integration = AnthropicIntegration() - - _add_ai_data_to_span( + json_deltas = ["{'test': 'data',", "'more': 'json'}"] + _set_output_data( span, integration, + model="", input_tokens=10, output_tokens=20, - content_blocks=["{'test': 'data',", "'more': 'json'}"], + content_blocks=[{"text": "".join(json_deltas), "type": "text"}], ) - assert span._data.get("ai.responses") == [ - {"type": "text", "text": "{'test': 'data','more': 'json'}"} - ] - assert span._data.get("ai.streaming") is True - assert span._data.get("gen_ai.usage.input_tokens") == 10 - assert span._data.get("gen_ai.usage.output_tokens") == 20 - assert span._data.get("gen_ai.usage.total_tokens") == 30 + assert ( + span._data.get(SPANDATA.GEN_AI_RESPONSE_TEXT) + == "[{\"text\": \"{'test': 'data','more': 'json'}\", \"type\": \"text\"}]" + ) + assert span._data.get(SPANDATA.GEN_AI_USAGE_INPUT_TOKENS) == 10 + assert span._data.get(SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS) == 20 + assert span._data.get(SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS) == 30 diff --git a/tests/integrations/clickhouse_driver/test_clickhouse_driver.py b/tests/integrations/clickhouse_driver/test_clickhouse_driver.py index 0675ad9ff5..635f9334c4 100644 --- a/tests/integrations/clickhouse_driver/test_clickhouse_driver.py +++ b/tests/integrations/clickhouse_driver/test_clickhouse_driver.py @@ -342,6 +342,38 @@ def test_clickhouse_client_spans( assert event["spans"] == expected_spans +def test_clickhouse_spans_with_generator(sentry_init, capture_events): + sentry_init( + integrations=[ClickhouseDriverIntegration()], + send_default_pii=True, + traces_sample_rate=1.0, + ) + events = capture_events() + + # Use a generator to test that the integration obtains values from the generator, + # without consuming the generator. + values = ({"x": i} for i in range(3)) + + with start_transaction(name="test_clickhouse_transaction"): + client = Client("localhost") + client.execute("DROP TABLE IF EXISTS test") + client.execute("CREATE TABLE test (x Int32) ENGINE = Memory") + client.execute("INSERT INTO test (x) VALUES", values) + res = client.execute("SELECT x FROM test") + + # Verify that the integration did not consume the generator + assert res == [(0,), (1,), (2,)] + + (event,) = events + spans = event["spans"] + + [span] = [ + span for span in spans if span["description"] == "INSERT INTO test (x) VALUES" + ] + + assert span["data"]["db.params"] == [{"x": 0}, {"x": 1}, {"x": 2}] + + def test_clickhouse_client_spans_with_pii( sentry_init, capture_events, capture_envelopes ) -> None: diff --git a/tests/integrations/langchain/test_langchain.py b/tests/integrations/langchain/test_langchain.py index 9d55a49f82..9a06ac05d4 100644 --- a/tests/integrations/langchain/test_langchain.py +++ b/tests/integrations/langchain/test_langchain.py @@ -54,15 +54,7 @@ def _llm_type(self) -> str: return llm_type -def tiktoken_encoding_if_installed(): - try: - import tiktoken # type: ignore # noqa # pylint: disable=unused-import - - return "cl100k_base" - except ImportError: - return None - - +@pytest.mark.xfail @pytest.mark.parametrize( "send_default_pii, include_prompts, use_unknown_llm_type", [ @@ -82,7 +74,6 @@ def test_langchain_agent( integrations=[ LangchainIntegration( include_prompts=include_prompts, - tiktoken_encoding_name=tiktoken_encoding_if_installed(), ) ], traces_sample_rate=1.0, @@ -144,7 +135,16 @@ def test_langchain_agent( ), ChatGenerationChunk( type="ChatGenerationChunk", - message=AIMessageChunk(content="5"), + message=AIMessageChunk( + content="5", + usage_metadata={ + "input_tokens": 142, + "output_tokens": 50, + "total_tokens": 192, + "input_token_details": {"audio": 0, "cache_read": 0}, + "output_token_details": {"audio": 0, "reasoning": 0}, + }, + ), generation_info={"finish_reason": "function_call"}, ), ], @@ -152,7 +152,16 @@ def test_langchain_agent( ChatGenerationChunk( text="The word eudca has 5 letters.", type="ChatGenerationChunk", - message=AIMessageChunk(content="The word eudca has 5 letters."), + message=AIMessageChunk( + content="The word eudca has 5 letters.", + usage_metadata={ + "input_tokens": 89, + "output_tokens": 28, + "total_tokens": 117, + "input_token_details": {"audio": 0, "cache_read": 0}, + "output_token_details": {"audio": 0, "reasoning": 0}, + }, + ), ), ChatGenerationChunk( type="ChatGenerationChunk", @@ -176,42 +185,49 @@ def test_langchain_agent( tx = events[0] assert tx["type"] == "transaction" - chat_spans = list( - x for x in tx["spans"] if x["op"] == "ai.chat_completions.create.langchain" - ) - tool_exec_span = next(x for x in tx["spans"] if x["op"] == "ai.tool.langchain") + chat_spans = list(x for x in tx["spans"] if x["op"] == "gen_ai.chat") + tool_exec_span = next(x for x in tx["spans"] if x["op"] == "gen_ai.execute_tool") assert len(chat_spans) == 2 # We can't guarantee anything about the "shape" of the langchain execution graph - assert len(list(x for x in tx["spans"] if x["op"] == "ai.run.langchain")) > 0 + assert len(list(x for x in tx["spans"] if x["op"] == "gen_ai.chat")) > 0 - if use_unknown_llm_type: - assert "gen_ai.usage.input_tokens" in chat_spans[0]["data"] - assert "gen_ai.usage.total_tokens" in chat_spans[0]["data"] - else: - # important: to avoid double counting, we do *not* measure - # tokens used if we have an explicit integration (e.g. OpenAI) - assert "measurements" not in chat_spans[0] + assert "gen_ai.usage.input_tokens" in chat_spans[0]["data"] + assert "gen_ai.usage.output_tokens" in chat_spans[0]["data"] + assert "gen_ai.usage.total_tokens" in chat_spans[0]["data"] + + assert chat_spans[0]["data"]["gen_ai.usage.input_tokens"] == 142 + assert chat_spans[0]["data"]["gen_ai.usage.output_tokens"] == 50 + assert chat_spans[0]["data"]["gen_ai.usage.total_tokens"] == 192 + + assert "gen_ai.usage.input_tokens" in chat_spans[1]["data"] + assert "gen_ai.usage.output_tokens" in chat_spans[1]["data"] + assert "gen_ai.usage.total_tokens" in chat_spans[1]["data"] + assert chat_spans[1]["data"]["gen_ai.usage.input_tokens"] == 89 + assert chat_spans[1]["data"]["gen_ai.usage.output_tokens"] == 28 + assert chat_spans[1]["data"]["gen_ai.usage.total_tokens"] == 117 if send_default_pii and include_prompts: assert ( - "You are very powerful" in chat_spans[0]["data"][SPANDATA.AI_INPUT_MESSAGES] + "You are very powerful" + in chat_spans[0]["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] ) - assert "5" in chat_spans[0]["data"][SPANDATA.AI_RESPONSES] - assert "word" in tool_exec_span["data"][SPANDATA.AI_INPUT_MESSAGES] - assert 5 == int(tool_exec_span["data"][SPANDATA.AI_RESPONSES]) + assert "5" in chat_spans[0]["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] + assert "word" in tool_exec_span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + assert 5 == int(tool_exec_span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT]) assert ( - "You are very powerful" in chat_spans[1]["data"][SPANDATA.AI_INPUT_MESSAGES] + "You are very powerful" + in chat_spans[1]["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] ) - assert "5" in chat_spans[1]["data"][SPANDATA.AI_RESPONSES] + assert "5" in chat_spans[1]["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] else: - assert SPANDATA.AI_INPUT_MESSAGES not in chat_spans[0].get("data", {}) - assert SPANDATA.AI_RESPONSES not in chat_spans[0].get("data", {}) - assert SPANDATA.AI_INPUT_MESSAGES not in chat_spans[1].get("data", {}) - assert SPANDATA.AI_RESPONSES not in chat_spans[1].get("data", {}) - assert SPANDATA.AI_INPUT_MESSAGES not in tool_exec_span.get("data", {}) - assert SPANDATA.AI_RESPONSES not in tool_exec_span.get("data", {}) + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in chat_spans[0].get("data", {}) + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in chat_spans[0].get("data", {}) + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in chat_spans[1].get("data", {}) + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in chat_spans[1].get("data", {}) + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in tool_exec_span.get("data", {}) + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in tool_exec_span.get("data", {}) def test_langchain_error(sentry_init, capture_events): @@ -311,7 +327,16 @@ def test_span_origin(sentry_init, capture_events): ), ChatGenerationChunk( type="ChatGenerationChunk", - message=AIMessageChunk(content="5"), + message=AIMessageChunk( + content="5", + usage_metadata={ + "input_tokens": 142, + "output_tokens": 50, + "total_tokens": 192, + "input_token_details": {"audio": 0, "cache_read": 0}, + "output_token_details": {"audio": 0, "reasoning": 0}, + }, + ), generation_info={"finish_reason": "function_call"}, ), ], @@ -319,7 +344,16 @@ def test_span_origin(sentry_init, capture_events): ChatGenerationChunk( text="The word eudca has 5 letters.", type="ChatGenerationChunk", - message=AIMessageChunk(content="The word eudca has 5 letters."), + message=AIMessageChunk( + content="The word eudca has 5 letters.", + usage_metadata={ + "input_tokens": 89, + "output_tokens": 28, + "total_tokens": 117, + "input_token_details": {"audio": 0, "cache_read": 0}, + "output_token_details": {"audio": 0, "reasoning": 0}, + }, + ), ), ChatGenerationChunk( type="ChatGenerationChunk", diff --git a/tests/integrations/logging/test_logging.py b/tests/integrations/logging/test_logging.py index 6ef4ae371b..7ecdf42500 100644 --- a/tests/integrations/logging/test_logging.py +++ b/tests/integrations/logging/test_logging.py @@ -304,7 +304,7 @@ def test_sentry_logs_warning(sentry_init, capture_envelopes): """ The python logger module should create 'warn' sentry logs if the flag is on. """ - sentry_init(_experiments={"enable_logs": True}) + sentry_init(enable_logs=True) envelopes = capture_envelopes() python_logger = logging.Logger("test-logger") @@ -329,7 +329,7 @@ def test_sentry_logs_debug(sentry_init, capture_envelopes): """ The python logger module should not create 'debug' sentry logs if the flag is on by default """ - sentry_init(_experiments={"enable_logs": True}) + sentry_init(enable_logs=True) envelopes = capture_envelopes() python_logger = logging.Logger("test-logger") @@ -344,7 +344,7 @@ def test_no_log_infinite_loop(sentry_init, capture_envelopes): If 'debug' mode is true, and you set a low log level in the logging integration, there should be no infinite loops. """ sentry_init( - _experiments={"enable_logs": True}, + enable_logs=True, integrations=[LoggingIntegration(sentry_logs_level=logging.DEBUG)], debug=True, ) @@ -361,7 +361,7 @@ def test_logging_errors(sentry_init, capture_envelopes): """ The python logger module should be able to log errors without erroring """ - sentry_init(_experiments={"enable_logs": True}) + sentry_init(enable_logs=True) envelopes = capture_envelopes() python_logger = logging.Logger("test-logger") @@ -396,7 +396,7 @@ def test_log_strips_project_root(sentry_init, capture_envelopes): The python logger should strip project roots from the log record path """ sentry_init( - _experiments={"enable_logs": True}, + enable_logs=True, project_root="/custom/test", ) envelopes = capture_envelopes() @@ -425,7 +425,7 @@ def test_logger_with_all_attributes(sentry_init, capture_envelopes): """ The python logger should be able to log all attributes, including extra data. """ - sentry_init(_experiments={"enable_logs": True}) + sentry_init(enable_logs=True) envelopes = capture_envelopes() python_logger = logging.Logger("test-logger") @@ -498,7 +498,7 @@ def test_sentry_logs_named_parameters(sentry_init, capture_envelopes): """ The python logger module should capture named parameters from dictionary arguments in Sentry logs. """ - sentry_init(_experiments={"enable_logs": True}) + sentry_init(enable_logs=True) envelopes = capture_envelopes() python_logger = logging.Logger("test-logger") @@ -543,7 +543,7 @@ def test_sentry_logs_named_parameters_complex_values(sentry_init, capture_envelo """ The python logger module should handle complex values in named parameters using safe_repr. """ - sentry_init(_experiments={"enable_logs": True}) + sentry_init(enable_logs=True) envelopes = capture_envelopes() python_logger = logging.Logger("test-logger") diff --git a/tests/integrations/loguru/test_loguru.py b/tests/integrations/loguru/test_loguru.py index c120d1d7e2..38093d24cb 100644 --- a/tests/integrations/loguru/test_loguru.py +++ b/tests/integrations/loguru/test_loguru.py @@ -141,7 +141,7 @@ def test_sentry_logs_warning( uninstall_integration("loguru") request.addfinalizer(logger.remove) - sentry_init(_experiments={"enable_logs": True}) + sentry_init(enable_logs=True) envelopes = capture_envelopes() logger.warning("this is {} a {}", "just", "template") @@ -165,7 +165,7 @@ def test_sentry_logs_debug( uninstall_integration("loguru") request.addfinalizer(logger.remove) - sentry_init(_experiments={"enable_logs": True}) + sentry_init(enable_logs=True) envelopes = capture_envelopes() logger.debug("this is %s a template %s", "1", "2") @@ -182,7 +182,7 @@ def test_sentry_log_levels( sentry_init( integrations=[LoguruIntegration(sentry_logs_level=LoggingLevels.SUCCESS)], - _experiments={"enable_logs": True}, + enable_logs=True, ) envelopes = capture_envelopes() @@ -216,7 +216,7 @@ def test_disable_loguru_logs( sentry_init( integrations=[LoguruIntegration(sentry_logs_level=None)], - _experiments={"enable_logs": True}, + enable_logs=True, ) envelopes = capture_envelopes() @@ -267,7 +267,7 @@ def test_no_log_infinite_loop( request.addfinalizer(logger.remove) sentry_init( - _experiments={"enable_logs": True}, + enable_logs=True, integrations=[LoguruIntegration(sentry_logs_level=LoggingLevels.DEBUG)], debug=True, ) @@ -284,7 +284,7 @@ def test_logging_errors(sentry_init, capture_envelopes, uninstall_integration, r uninstall_integration("loguru") request.addfinalizer(logger.remove) - sentry_init(_experiments={"enable_logs": True}) + sentry_init(enable_logs=True) envelopes = capture_envelopes() logger.error(Exception("test exc 1")) @@ -313,7 +313,7 @@ def test_log_strips_project_root( request.addfinalizer(logger.remove) sentry_init( - _experiments={"enable_logs": True}, + enable_logs=True, project_root="/custom/test", ) envelopes = capture_envelopes() @@ -362,7 +362,7 @@ def test_log_keeps_full_path_if_not_in_project_root( request.addfinalizer(logger.remove) sentry_init( - _experiments={"enable_logs": True}, + enable_logs=True, project_root="/custom/test", ) envelopes = capture_envelopes() @@ -410,7 +410,7 @@ def test_logger_with_all_attributes( uninstall_integration("loguru") request.addfinalizer(logger.remove) - sentry_init(_experiments={"enable_logs": True}) + sentry_init(enable_logs=True) envelopes = capture_envelopes() logger.warning("log #{}", 1) diff --git a/tests/integrations/openai/test_openai.py b/tests/integrations/openai/test_openai.py index 5767f84d04..a3c7bdd9d9 100644 --- a/tests/integrations/openai/test_openai.py +++ b/tests/integrations/openai/test_openai.py @@ -1,4 +1,12 @@ import pytest + +from sentry_sdk.utils import package_version + +try: + from openai import NOT_GIVEN +except ImportError: + NOT_GIVEN = None + from openai import AsyncOpenAI, OpenAI, AsyncStream, Stream, OpenAIError from openai.types import CompletionUsage, CreateEmbeddingResponse, Embedding from openai.types.chat import ChatCompletion, ChatCompletionMessage, ChatCompletionChunk @@ -43,6 +51,7 @@ async def __call__(self, *args, **kwargs): return super(AsyncMock, self).__call__(*args, **kwargs) +OPENAI_VERSION = package_version("openai") EXAMPLE_CHAT_COMPLETION = ChatCompletion( id="chat-id", choices=[ @@ -1387,3 +1396,34 @@ async def test_streaming_responses_api_async( assert span["data"]["gen_ai.usage.input_tokens"] == 20 assert span["data"]["gen_ai.usage.output_tokens"] == 10 assert span["data"]["gen_ai.usage.total_tokens"] == 30 + + +@pytest.mark.skipif( + OPENAI_VERSION <= (1, 1, 0), + reason="OpenAI versions <=1.1.0 do not support the tools parameter.", +) +@pytest.mark.parametrize( + "tools", + [[], None, NOT_GIVEN], +) +def test_empty_tools_in_chat_completion(sentry_init, capture_events, tools): + sentry_init( + integrations=[OpenAIIntegration()], + traces_sample_rate=1.0, + ) + events = capture_events() + + client = OpenAI(api_key="z") + client.chat.completions._post = mock.Mock(return_value=EXAMPLE_CHAT_COMPLETION) + + with start_transaction(name="openai tx"): + client.chat.completions.create( + model="some-model", + messages=[{"role": "system", "content": "hello"}], + tools=tools, + ) + + (event,) = events + span = event["spans"][0] + + assert "gen_ai.request.available_tools" not in span["data"] diff --git a/tests/integrations/pymongo/test_pymongo.py b/tests/integrations/pymongo/test_pymongo.py index 10f1c9fba9..7e6556f85a 100644 --- a/tests/integrations/pymongo/test_pymongo.py +++ b/tests/integrations/pymongo/test_pymongo.py @@ -10,7 +10,7 @@ @pytest.fixture(scope="session") def mongo_server(): server = MockupDB(verbose=True) - server.autoresponds("ismaster", maxWireVersion=7) + server.autoresponds("ismaster", maxWireVersion=8) server.run() server.autoresponds( {"find": "test_collection"}, cursor={"id": 123, "firstBatch": []} diff --git a/tests/integrations/test_gnu_backtrace.py b/tests/integrations/test_gnu_backtrace.py index 63930f850d..be7346a2c3 100644 --- a/tests/integrations/test_gnu_backtrace.py +++ b/tests/integrations/test_gnu_backtrace.py @@ -31,8 +31,38 @@ 24. ? @ 0x00000000000d162c in /usr/lib/aarch64-linux-gnu/libc-2.31.so """ +LINES_NO_PATH = r""" +0. DB::Exception::Exception(DB::Exception::MessageMasked&&, int, bool) @ 0x000000000bfc38a4 +1. DB::Exception::Exception(int, FormatStringHelperImpl::type, std::type_identity::type>, String&&, String&&) @ 0x00000000075d242c +2. DB::ActionsMatcher::visit(DB::ASTIdentifier const&, std::shared_ptr const&, DB::ActionsMatcher::Data&) @ 0x0000000010b1c648 +3. DB::ActionsMatcher::visit(DB::ASTFunction const&, std::shared_ptr const&, DB::ActionsMatcher::Data&) @ 0x0000000010b1f58c +4. DB::ActionsMatcher::visit(DB::ASTFunction const&, std::shared_ptr const&, DB::ActionsMatcher::Data&) @ 0x0000000010b1f58c +5. DB::ActionsMatcher::visit(std::shared_ptr const&, DB::ActionsMatcher::Data&) @ 0x0000000010b1c394 +6. DB::InDepthNodeVisitor const>::doVisit(std::shared_ptr const&) @ 0x0000000010b154a0 +7. DB::ExpressionAnalyzer::getRootActions(std::shared_ptr const&, bool, std::shared_ptr&, bool) @ 0x0000000010af83b4 +8. DB::SelectQueryExpressionAnalyzer::appendSelect(DB::ExpressionActionsChain&, bool) @ 0x0000000010aff168 +9. DB::ExpressionAnalysisResult::ExpressionAnalysisResult(DB::SelectQueryExpressionAnalyzer&, std::shared_ptr const&, bool, bool, bool, std::shared_ptr const&, std::shared_ptr const&, DB::Block const&) @ 0x0000000010b05b74 +10. DB::InterpreterSelectQuery::getSampleBlockImpl() @ 0x00000000111559fc +11. DB::InterpreterSelectQuery::InterpreterSelectQuery(std::shared_ptr const&, std::shared_ptr const&, std::optional, std::shared_ptr const&, DB::SelectQueryOptions const&, std::vector> const&, std::shared_ptr const&, std::shared_ptr)::$_0::operator()(bool) const @ 0x0000000011148254 +12. DB::InterpreterSelectQuery::InterpreterSelectQuery(std::shared_ptr const&, std::shared_ptr const&, std::optional, std::shared_ptr const&, DB::SelectQueryOptions const&, std::vector> const&, std::shared_ptr const&, std::shared_ptr) @ 0x00000000111413e8 +13. DB::InterpreterSelectWithUnionQuery::InterpreterSelectWithUnionQuery(std::shared_ptr const&, std::shared_ptr, DB::SelectQueryOptions const&, std::vector> const&) @ 0x00000000111d3708 +14. DB::InterpreterFactory::get(std::shared_ptr&, std::shared_ptr, DB::SelectQueryOptions const&) @ 0x0000000011100b64 +15. DB::executeQueryImpl(char const*, char const*, std::shared_ptr, bool, DB::QueryProcessingStage::Enum, DB::ReadBuffer*) @ 0x00000000114c3f3c +16. DB::executeQuery(String const&, std::shared_ptr, bool, DB::QueryProcessingStage::Enum) @ 0x00000000114c0ec8 +17. DB::TCPHandler::runImpl() @ 0x00000000121bb5d8 +18. DB::TCPHandler::run() @ 0x00000000121cb728 +19. Poco::Net::TCPServerConnection::start() @ 0x00000000146d9404 +20. Poco::Net::TCPServerDispatcher::run() @ 0x00000000146da900 +21. Poco::PooledThread::run() @ 0x000000001484da7c +22. Poco::ThreadImpl::runnableEntry(void*) @ 0x000000001484bc24 +23. start_thread @ 0x0000000000007624 +24. ? @ 0x00000000000d162c +""" + -@pytest.mark.parametrize("input", LINES.strip().splitlines()) +@pytest.mark.parametrize( + "input", LINES.strip().splitlines() + LINES_NO_PATH.strip().splitlines() +) def test_basic(sentry_init, capture_events, input): sentry_init(integrations=[GnuBacktraceIntegration()]) events = capture_events() diff --git a/tests/test_logs.py b/tests/test_logs.py index a2f412dcb0..b2578d83d5 100644 --- a/tests/test_logs.py +++ b/tests/test_logs.py @@ -80,7 +80,7 @@ def test_logs_disabled_by_default(sentry_init, capture_envelopes): @minimum_python_37 def test_logs_basics(sentry_init, capture_envelopes): - sentry_init(_experiments={"enable_logs": True}) + sentry_init(enable_logs=True) envelopes = capture_envelopes() sentry_sdk.logger.trace("This is a 'trace' log...") @@ -111,11 +111,29 @@ def test_logs_basics(sentry_init, capture_envelopes): assert logs[5].get("severity_number") == 21 +@minimum_python_37 +def test_logs_experimental_option_still_works(sentry_init, capture_envelopes): + sentry_init(_experiments={"enable_logs": True}) + envelopes = capture_envelopes() + + sentry_sdk.logger.error("This is an error log...") + + get_client().flush() + + logs = envelopes_to_logs(envelopes) + assert len(logs) == 1 + + assert logs[0].get("severity_text") == "error" + assert logs[0].get("severity_number") == 17 + + @minimum_python_37 def test_logs_before_send_log(sentry_init, capture_envelopes): - before_log_called = [False] + before_log_called = False def _before_log(record, hint): + nonlocal before_log_called + assert set(record.keys()) == { "severity_text", "severity_number", @@ -128,15 +146,13 @@ def _before_log(record, hint): if record["severity_text"] in ["fatal", "error"]: return None - before_log_called[0] = True + before_log_called = True return record sentry_init( - _experiments={ - "enable_logs": True, - "before_send_log": _before_log, - } + enable_logs=True, + before_send_log=_before_log, ) envelopes = capture_envelopes() @@ -155,7 +171,37 @@ def _before_log(record, hint): assert logs[1]["severity_text"] == "debug" assert logs[2]["severity_text"] == "info" assert logs[3]["severity_text"] == "warn" - assert before_log_called[0] + assert before_log_called is True + + +@minimum_python_37 +def test_logs_before_send_log_experimental_option_still_works( + sentry_init, capture_envelopes +): + before_log_called = False + + def _before_log(record, hint): + nonlocal before_log_called + before_log_called = True + + return record + + sentry_init( + enable_logs=True, + _experiments={ + "before_send_log": _before_log, + }, + ) + envelopes = capture_envelopes() + + sentry_sdk.logger.error("This is an error log...") + + get_client().flush() + logs = envelopes_to_logs(envelopes) + assert len(logs) == 1 + + assert logs[0]["severity_text"] == "error" + assert before_log_called is True @minimum_python_37 @@ -163,7 +209,7 @@ def test_logs_attributes(sentry_init, capture_envelopes): """ Passing arbitrary attributes to log messages. """ - sentry_init(_experiments={"enable_logs": True}, server_name="test-server") + sentry_init(enable_logs=True, server_name="test-server") envelopes = capture_envelopes() attrs = { @@ -196,7 +242,7 @@ def test_logs_message_params(sentry_init, capture_envelopes): """ This is the official way of how to pass vars to log messages. """ - sentry_init(_experiments={"enable_logs": True}) + sentry_init(enable_logs=True) envelopes = capture_envelopes() sentry_sdk.logger.warning("The recorded value was '{int_var}'", int_var=1) @@ -239,7 +285,7 @@ def test_logs_tied_to_transactions(sentry_init, capture_envelopes): """ Log messages are also tied to transactions. """ - sentry_init(_experiments={"enable_logs": True}) + sentry_init(enable_logs=True) envelopes = capture_envelopes() with sentry_sdk.start_transaction(name="test-transaction") as trx: @@ -255,7 +301,7 @@ def test_logs_tied_to_spans(sentry_init, capture_envelopes): """ Log messages are also tied to spans. """ - sentry_init(_experiments={"enable_logs": True}) + sentry_init(enable_logs=True) envelopes = capture_envelopes() with sentry_sdk.start_transaction(name="test-transaction"): @@ -271,7 +317,7 @@ def test_auto_flush_logs_after_100(sentry_init, capture_envelopes): """ If you log >100 logs, it should automatically trigger a flush. """ - sentry_init(_experiments={"enable_logs": True}) + sentry_init(enable_logs=True) envelopes = capture_envelopes() python_logger = logging.Logger("test-logger") @@ -288,7 +334,7 @@ def test_auto_flush_logs_after_100(sentry_init, capture_envelopes): def test_log_user_attributes(sentry_init, capture_envelopes): """User attributes are sent if enable_logs is True.""" - sentry_init(_experiments={"enable_logs": True}) + sentry_init(enable_logs=True) sentry_sdk.set_user({"id": "1", "email": "test@example.com", "username": "test"}) envelopes = capture_envelopes() @@ -314,7 +360,7 @@ def test_auto_flush_logs_after_5s(sentry_init, capture_envelopes): """ If you log a single log, it should automatically flush after 5 seconds, at most 10 seconds. """ - sentry_init(_experiments={"enable_logs": True}) + sentry_init(enable_logs=True) envelopes = capture_envelopes() python_logger = logging.Logger("test-logger") diff --git a/tests/tracing/test_decorator.py b/tests/tracing/test_decorator.py index 18a66bd43e..15432f5862 100644 --- a/tests/tracing/test_decorator.py +++ b/tests/tracing/test_decorator.py @@ -3,8 +3,10 @@ import pytest +import sentry_sdk +from sentry_sdk.consts import SPANTEMPLATE from sentry_sdk.tracing import trace -from sentry_sdk.tracing_utils import start_child_span_decorator +from sentry_sdk.tracing_utils import create_span_decorator from sentry_sdk.utils import logger from tests.conftest import patch_start_tracing_child @@ -24,6 +26,7 @@ def test_trace_decorator(): fake_start_child.assert_not_called() assert result == "return_of_sync_function" + start_child_span_decorator = create_span_decorator() result2 = start_child_span_decorator(my_example_function)() fake_start_child.assert_called_once_with( op="function", name="test_decorator.my_example_function" @@ -38,6 +41,7 @@ def test_trace_decorator_no_trx(): fake_debug.assert_not_called() assert result == "return_of_sync_function" + start_child_span_decorator = create_span_decorator() result2 = start_child_span_decorator(my_example_function)() fake_debug.assert_called_once_with( "Cannot create a child span for %s. " @@ -55,6 +59,7 @@ async def test_trace_decorator_async(): fake_start_child.assert_not_called() assert result == "return_of_async_function" + start_child_span_decorator = create_span_decorator() result2 = await start_child_span_decorator(my_async_example_function)() fake_start_child.assert_called_once_with( op="function", @@ -71,6 +76,7 @@ async def test_trace_decorator_async_no_trx(): fake_debug.assert_not_called() assert result == "return_of_async_function" + start_child_span_decorator = create_span_decorator() result2 = await start_child_span_decorator(my_async_example_function)() fake_debug.assert_called_once_with( "Cannot create a child span for %s. " @@ -113,3 +119,248 @@ async def _some_function_traced(a, b, c): assert inspect.getcallargs(_some_function, 1, 2, 3) == inspect.getcallargs( _some_function_traced, 1, 2, 3 ) + + +def test_span_templates_ai_dicts(sentry_init, capture_events): + sentry_init(traces_sample_rate=1.0) + events = capture_events() + + @sentry_sdk.trace(template=SPANTEMPLATE.AI_TOOL) + def my_tool(arg1, arg2): + return { + "output": "my_tool_result", + "usage": { + "prompt_tokens": 10, + "completion_tokens": 20, + "total_tokens": 30, + }, + } + + @sentry_sdk.trace(template=SPANTEMPLATE.AI_CHAT) + def my_chat(model=None, **kwargs): + return { + "content": "my_chat_result", + "usage": { + "input_tokens": 11, + "output_tokens": 22, + "total_tokens": 33, + }, + "model": f"{model}-v123", + } + + @sentry_sdk.trace(template=SPANTEMPLATE.AI_AGENT) + def my_agent(): + my_tool(1, 2) + my_chat( + model="my-gpt-4o-mini", + prompt="What is the weather in Tokyo?", + system_prompt="You are a helpful assistant that can answer questions about the weather.", + max_tokens=100, + temperature=0.5, + top_p=0.9, + top_k=40, + frequency_penalty=1.0, + presence_penalty=2.0, + ) + + with sentry_sdk.start_transaction(name="test-transaction"): + my_agent() + + (event,) = events + (agent_span, tool_span, chat_span) = event["spans"] + + assert agent_span["op"] == "gen_ai.invoke_agent" + assert ( + agent_span["description"] + == "invoke_agent test_decorator.test_span_templates_ai_dicts..my_agent" + ) + assert agent_span["data"] == { + "gen_ai.agent.name": "test_decorator.test_span_templates_ai_dicts..my_agent", + "gen_ai.operation.name": "invoke_agent", + "thread.id": mock.ANY, + "thread.name": mock.ANY, + } + + assert tool_span["op"] == "gen_ai.execute_tool" + assert ( + tool_span["description"] + == "execute_tool test_decorator.test_span_templates_ai_dicts..my_tool" + ) + assert tool_span["data"] == { + "gen_ai.tool.name": "test_decorator.test_span_templates_ai_dicts..my_tool", + "gen_ai.operation.name": "execute_tool", + "gen_ai.usage.input_tokens": 10, + "gen_ai.usage.output_tokens": 20, + "gen_ai.usage.total_tokens": 30, + "thread.id": mock.ANY, + "thread.name": mock.ANY, + } + assert "gen_ai.tool.description" not in tool_span["data"] + + assert chat_span["op"] == "gen_ai.chat" + assert chat_span["description"] == "chat my-gpt-4o-mini" + assert chat_span["data"] == { + "gen_ai.operation.name": "chat", + "gen_ai.request.frequency_penalty": 1.0, + "gen_ai.request.max_tokens": 100, + "gen_ai.request.messages": "[{'role': 'user', 'content': 'What is the weather in Tokyo?'}, {'role': 'system', 'content': 'You are a helpful assistant that can answer questions about the weather.'}]", + "gen_ai.request.model": "my-gpt-4o-mini", + "gen_ai.request.presence_penalty": 2.0, + "gen_ai.request.temperature": 0.5, + "gen_ai.request.top_k": 40, + "gen_ai.request.top_p": 0.9, + "gen_ai.response.model": "my-gpt-4o-mini-v123", + "gen_ai.usage.input_tokens": 11, + "gen_ai.usage.output_tokens": 22, + "gen_ai.usage.total_tokens": 33, + "thread.id": mock.ANY, + "thread.name": mock.ANY, + } + + +def test_span_templates_ai_objects(sentry_init, capture_events): + sentry_init(traces_sample_rate=1.0) + events = capture_events() + + @sentry_sdk.trace(template=SPANTEMPLATE.AI_TOOL) + def my_tool(arg1, arg2): + """This is a tool function.""" + mock_usage = mock.Mock() + mock_usage.prompt_tokens = 10 + mock_usage.completion_tokens = 20 + mock_usage.total_tokens = 30 + + mock_result = mock.Mock() + mock_result.output = "my_tool_result" + mock_result.usage = mock_usage + + return mock_result + + @sentry_sdk.trace(template=SPANTEMPLATE.AI_CHAT) + def my_chat(model=None, **kwargs): + mock_result = mock.Mock() + mock_result.content = "my_chat_result" + mock_result.usage = mock.Mock( + input_tokens=11, + output_tokens=22, + total_tokens=33, + ) + mock_result.model = f"{model}-v123" + + return mock_result + + @sentry_sdk.trace(template=SPANTEMPLATE.AI_AGENT) + def my_agent(): + my_tool(1, 2) + my_chat( + model="my-gpt-4o-mini", + prompt="What is the weather in Tokyo?", + system_prompt="You are a helpful assistant that can answer questions about the weather.", + max_tokens=100, + temperature=0.5, + top_p=0.9, + top_k=40, + frequency_penalty=1.0, + presence_penalty=2.0, + ) + + with sentry_sdk.start_transaction(name="test-transaction"): + my_agent() + + (event,) = events + (agent_span, tool_span, chat_span) = event["spans"] + + assert agent_span["op"] == "gen_ai.invoke_agent" + assert ( + agent_span["description"] + == "invoke_agent test_decorator.test_span_templates_ai_objects..my_agent" + ) + assert agent_span["data"] == { + "gen_ai.agent.name": "test_decorator.test_span_templates_ai_objects..my_agent", + "gen_ai.operation.name": "invoke_agent", + "thread.id": mock.ANY, + "thread.name": mock.ANY, + } + + assert tool_span["op"] == "gen_ai.execute_tool" + assert ( + tool_span["description"] + == "execute_tool test_decorator.test_span_templates_ai_objects..my_tool" + ) + assert tool_span["data"] == { + "gen_ai.tool.name": "test_decorator.test_span_templates_ai_objects..my_tool", + "gen_ai.tool.description": "This is a tool function.", + "gen_ai.operation.name": "execute_tool", + "gen_ai.usage.input_tokens": 10, + "gen_ai.usage.output_tokens": 20, + "gen_ai.usage.total_tokens": 30, + "thread.id": mock.ANY, + "thread.name": mock.ANY, + } + + assert chat_span["op"] == "gen_ai.chat" + assert chat_span["description"] == "chat my-gpt-4o-mini" + assert chat_span["data"] == { + "gen_ai.operation.name": "chat", + "gen_ai.request.frequency_penalty": 1.0, + "gen_ai.request.max_tokens": 100, + "gen_ai.request.messages": "[{'role': 'user', 'content': 'What is the weather in Tokyo?'}, {'role': 'system', 'content': 'You are a helpful assistant that can answer questions about the weather.'}]", + "gen_ai.request.model": "my-gpt-4o-mini", + "gen_ai.request.presence_penalty": 2.0, + "gen_ai.request.temperature": 0.5, + "gen_ai.request.top_k": 40, + "gen_ai.request.top_p": 0.9, + "gen_ai.response.model": "my-gpt-4o-mini-v123", + "gen_ai.usage.input_tokens": 11, + "gen_ai.usage.output_tokens": 22, + "gen_ai.usage.total_tokens": 33, + "thread.id": mock.ANY, + "thread.name": mock.ANY, + } + + +@pytest.mark.parametrize("send_default_pii", [True, False]) +def test_span_templates_ai_pii(sentry_init, capture_events, send_default_pii): + sentry_init(traces_sample_rate=1.0, send_default_pii=send_default_pii) + events = capture_events() + + @sentry_sdk.trace(template=SPANTEMPLATE.AI_TOOL) + def my_tool(arg1, arg2, **kwargs): + """This is a tool function.""" + return "tool_output" + + @sentry_sdk.trace(template=SPANTEMPLATE.AI_CHAT) + def my_chat(model=None, **kwargs): + return "chat_output" + + @sentry_sdk.trace(template=SPANTEMPLATE.AI_AGENT) + def my_agent(*args, **kwargs): + my_tool(1, 2, tool_arg1="3", tool_arg2="4") + my_chat( + model="my-gpt-4o-mini", + prompt="What is the weather in Tokyo?", + system_prompt="You are a helpful assistant that can answer questions about the weather.", + max_tokens=100, + temperature=0.5, + top_p=0.9, + top_k=40, + frequency_penalty=1.0, + presence_penalty=2.0, + ) + return "agent_output" + + with sentry_sdk.start_transaction(name="test-transaction"): + my_agent(22, 33, arg1=44, arg2=55) + + (event,) = events + (_, tool_span, _) = event["spans"] + + if send_default_pii: + assert ( + tool_span["data"]["gen_ai.tool.input"] + == "{'args': (1, 2), 'kwargs': {'tool_arg1': '3', 'tool_arg2': '4'}}" + ) + assert tool_span["data"]["gen_ai.tool.output"] == "'tool_output'" + else: + assert "gen_ai.tool.input" not in tool_span["data"] + assert "gen_ai.tool.output" not in tool_span["data"] diff --git a/tests/tracing/test_misc.py b/tests/tracing/test_misc.py index b954d36e1a..e1de847102 100644 --- a/tests/tracing/test_misc.py +++ b/tests/tracing/test_misc.py @@ -509,3 +509,79 @@ def test_transaction_not_started_warning(sentry_init): "The transaction will not be sent to Sentry. To fix, start the transaction by" "passing it to sentry_sdk.start_transaction." ) + + +def test_span_set_data_update_data(sentry_init, capture_events): + sentry_init(traces_sample_rate=1.0) + + events = capture_events() + + with sentry_sdk.start_transaction(name="test-transaction"): + with start_span(op="test-span") as span: + span.set_data("key0", "value0") + span.set_data("key1", "value1") + + span.update_data( + { + "key1": "updated-value1", + "key2": "value2", + "key3": "value3", + } + ) + + (event,) = events + span = event["spans"][0] + + assert span["data"] == { + "key0": "value0", + "key1": "updated-value1", + "key2": "value2", + "key3": "value3", + "thread.id": mock.ANY, + "thread.name": mock.ANY, + } + + +def test_update_current_span(sentry_init, capture_events): + sentry_init(traces_sample_rate=1.0) + + events = capture_events() + + with sentry_sdk.start_transaction(name="test-transaction"): + with start_span(op="test-span-op", name="test-span-name"): + sentry_sdk.update_current_span( + op="updated-span-op", + name="updated-span-name", + attributes={ + "key0": "value0", + "key1": "value1", + }, + ) + + sentry_sdk.update_current_span( + op="updated-span-op-2", + ) + + sentry_sdk.update_current_span( + name="updated-span-name-3", + ) + + sentry_sdk.update_current_span( + attributes={ + "key1": "updated-value-4", + "key2": "value2", + }, + ) + + (event,) = events + span = event["spans"][0] + + assert span["op"] == "updated-span-op-2" + assert span["description"] == "updated-span-name-3" + assert span["data"] == { + "key0": "value0", + "key1": "updated-value-4", + "key2": "value2", + "thread.id": mock.ANY, + "thread.name": mock.ANY, + } diff --git a/tox.ini b/tox.ini index 16067de8c7..a1b1327af5 100644 --- a/tox.ini +++ b/tox.ini @@ -10,7 +10,7 @@ # The file (and all resulting CI YAMLs) then need to be regenerated via # "scripts/generate-test-files.sh". # -# Last generated: 2025-07-29T06:07:22.069934+00:00 +# Last generated: 2025-08-12T07:16:34.585160+00:00 [tox] requires = @@ -138,21 +138,21 @@ envlist = {py3.8,py3.11,py3.12}-anthropic-v0.16.0 {py3.8,py3.11,py3.12}-anthropic-v0.31.2 {py3.8,py3.11,py3.12}-anthropic-v0.46.0 - {py3.8,py3.12,py3.13}-anthropic-v0.60.0 + {py3.8,py3.12,py3.13}-anthropic-v0.62.0 {py3.9,py3.10,py3.11}-cohere-v5.4.0 {py3.9,py3.11,py3.12}-cohere-v5.9.4 {py3.9,py3.11,py3.12}-cohere-v5.13.12 - {py3.9,py3.11,py3.12}-cohere-v5.16.1 + {py3.9,py3.11,py3.12}-cohere-v5.16.3 {py3.10,py3.11,py3.12}-openai_agents-v0.0.19 {py3.10,py3.12,py3.13}-openai_agents-v0.1.0 - {py3.10,py3.12,py3.13}-openai_agents-v0.2.3 + {py3.10,py3.12,py3.13}-openai_agents-v0.2.6 {py3.8,py3.10,py3.11}-huggingface_hub-v0.22.2 {py3.8,py3.11,py3.12}-huggingface_hub-v0.26.5 {py3.8,py3.12,py3.13}-huggingface_hub-v0.30.2 - {py3.8,py3.12,py3.13}-huggingface_hub-v0.34.2 + {py3.8,py3.12,py3.13}-huggingface_hub-v0.34.4 {py3.8,py3.12,py3.13}-huggingface_hub-v0.35.0rc0 @@ -162,7 +162,7 @@ envlist = {py3.6}-pymongo-v3.5.1 {py3.6,py3.10,py3.11}-pymongo-v3.13.0 {py3.6,py3.9,py3.10}-pymongo-v4.0.2 - {py3.9,py3.12,py3.13}-pymongo-v4.13.2 + {py3.9,py3.12,py3.13}-pymongo-v4.14.0 {py3.6}-redis_py_cluster_legacy-v1.3.6 {py3.6,py3.7}-redis_py_cluster_legacy-v2.0.0 @@ -170,7 +170,7 @@ envlist = {py3.6,py3.8,py3.9}-sqlalchemy-v1.3.24 {py3.6,py3.11,py3.12}-sqlalchemy-v1.4.54 - {py3.7,py3.12,py3.13}-sqlalchemy-v2.0.41 + {py3.7,py3.12,py3.13}-sqlalchemy-v2.0.43 # ~~~ Flags ~~~ @@ -180,7 +180,7 @@ envlist = {py3.9,py3.12,py3.13}-launchdarkly-v9.12.0 {py3.8,py3.12,py3.13}-openfeature-v0.7.5 - {py3.9,py3.12,py3.13}-openfeature-v0.8.1 + {py3.9,py3.12,py3.13}-openfeature-v0.8.2 {py3.7,py3.12,py3.13}-statsig-v0.55.3 {py3.7,py3.12,py3.13}-statsig-v0.57.3 @@ -209,7 +209,7 @@ envlist = {py3.8,py3.10,py3.11}-strawberry-v0.209.8 {py3.8,py3.11,py3.12}-strawberry-v0.232.2 {py3.8,py3.12,py3.13}-strawberry-v0.255.0 - {py3.9,py3.12,py3.13}-strawberry-v0.278.0 + {py3.9,py3.12,py3.13}-strawberry-v0.278.1 # ~~~ Network ~~~ @@ -245,7 +245,7 @@ envlist = {py3.6,py3.9,py3.10}-django-v3.2.25 {py3.8,py3.11,py3.12}-django-v4.2.23 {py3.10,py3.11,py3.12}-django-v5.0.14 - {py3.10,py3.12,py3.13}-django-v5.2.4 + {py3.10,py3.12,py3.13}-django-v5.2.5 {py3.6,py3.7,py3.8}-flask-v1.1.4 {py3.8,py3.12,py3.13}-flask-v2.3.3 @@ -276,12 +276,12 @@ envlist = {py3.6,py3.7}-falcon-v2.0.0 {py3.6,py3.11,py3.12}-falcon-v3.1.3 {py3.8,py3.11,py3.12}-falcon-v4.0.2 - {py3.8,py3.11,py3.12}-falcon-v4.1.0a3 + {py3.8,py3.11,py3.12}-falcon-v4.1.0 {py3.8,py3.10,py3.11}-litestar-v2.0.1 - {py3.8,py3.11,py3.12}-litestar-v2.5.5 - {py3.8,py3.11,py3.12}-litestar-v2.10.0 - {py3.8,py3.12,py3.13}-litestar-v2.16.0 + {py3.8,py3.11,py3.12}-litestar-v2.6.4 + {py3.8,py3.11,py3.12}-litestar-v2.12.1 + {py3.8,py3.12,py3.13}-litestar-v2.17.0 {py3.6}-pyramid-v1.8.6 {py3.6,py3.8,py3.9}-pyramid-v1.10.8 @@ -295,7 +295,7 @@ envlist = {py3.6,py3.7,py3.8}-tornado-v6.0.4 {py3.7,py3.9,py3.10}-tornado-v6.2 {py3.8,py3.10,py3.11}-tornado-v6.4.2 - {py3.9,py3.12,py3.13}-tornado-v6.5.1 + {py3.9,py3.12,py3.13}-tornado-v6.5.2 # ~~~ Misc ~~~ @@ -306,7 +306,7 @@ envlist = {py3.6,py3.7,py3.8}-trytond-v5.8.16 {py3.8,py3.10,py3.11}-trytond-v6.8.17 {py3.8,py3.11,py3.12}-trytond-v7.0.34 - {py3.9,py3.12,py3.13}-trytond-v7.6.4 + {py3.9,py3.12,py3.13}-trytond-v7.6.5 {py3.7,py3.12,py3.13}-typer-v0.15.4 {py3.7,py3.12,py3.13}-typer-v0.16.0 @@ -468,6 +468,7 @@ deps = {py3.7,py3.8,py3.9,py3.10,py3.11,py3.12,py3.13}-redis: pytest-asyncio redis-v3: redis~=3.0 redis-v4: redis~=4.0 + redis-v4: fakeredis<2.31.0 redis-v5: redis~=5.0 redis-latest: redis @@ -512,7 +513,7 @@ deps = anthropic-v0.16.0: anthropic==0.16.0 anthropic-v0.31.2: anthropic==0.31.2 anthropic-v0.46.0: anthropic==0.46.0 - anthropic-v0.60.0: anthropic==0.60.0 + anthropic-v0.62.0: anthropic==0.62.0 anthropic: pytest-asyncio anthropic-v0.16.0: httpx<0.28.0 anthropic-v0.31.2: httpx<0.28.0 @@ -521,17 +522,17 @@ deps = cohere-v5.4.0: cohere==5.4.0 cohere-v5.9.4: cohere==5.9.4 cohere-v5.13.12: cohere==5.13.12 - cohere-v5.16.1: cohere==5.16.1 + cohere-v5.16.3: cohere==5.16.3 openai_agents-v0.0.19: openai-agents==0.0.19 openai_agents-v0.1.0: openai-agents==0.1.0 - openai_agents-v0.2.3: openai-agents==0.2.3 + openai_agents-v0.2.6: openai-agents==0.2.6 openai_agents: pytest-asyncio huggingface_hub-v0.22.2: huggingface_hub==0.22.2 huggingface_hub-v0.26.5: huggingface_hub==0.26.5 huggingface_hub-v0.30.2: huggingface_hub==0.30.2 - huggingface_hub-v0.34.2: huggingface_hub==0.34.2 + huggingface_hub-v0.34.4: huggingface_hub==0.34.4 huggingface_hub-v0.35.0rc0: huggingface_hub==0.35.0rc0 @@ -541,7 +542,7 @@ deps = pymongo-v3.5.1: pymongo==3.5.1 pymongo-v3.13.0: pymongo==3.13.0 pymongo-v4.0.2: pymongo==4.0.2 - pymongo-v4.13.2: pymongo==4.13.2 + pymongo-v4.14.0: pymongo==4.14.0 pymongo: mockupdb redis_py_cluster_legacy-v1.3.6: redis-py-cluster==1.3.6 @@ -550,7 +551,7 @@ deps = sqlalchemy-v1.3.24: sqlalchemy==1.3.24 sqlalchemy-v1.4.54: sqlalchemy==1.4.54 - sqlalchemy-v2.0.41: sqlalchemy==2.0.41 + sqlalchemy-v2.0.43: sqlalchemy==2.0.43 # ~~~ Flags ~~~ @@ -560,7 +561,7 @@ deps = launchdarkly-v9.12.0: launchdarkly-server-sdk==9.12.0 openfeature-v0.7.5: openfeature-sdk==0.7.5 - openfeature-v0.8.1: openfeature-sdk==0.8.1 + openfeature-v0.8.2: openfeature-sdk==0.8.2 statsig-v0.55.3: statsig==0.55.3 statsig-v0.57.3: statsig==0.57.3 @@ -598,7 +599,7 @@ deps = strawberry-v0.209.8: strawberry-graphql[fastapi,flask]==0.209.8 strawberry-v0.232.2: strawberry-graphql[fastapi,flask]==0.232.2 strawberry-v0.255.0: strawberry-graphql[fastapi,flask]==0.255.0 - strawberry-v0.278.0: strawberry-graphql[fastapi,flask]==0.278.0 + strawberry-v0.278.1: strawberry-graphql[fastapi,flask]==0.278.1 strawberry: httpx strawberry-v0.209.8: pydantic<2.11 strawberry-v0.232.2: pydantic<2.11 @@ -645,7 +646,7 @@ deps = django-v3.2.25: django==3.2.25 django-v4.2.23: django==4.2.23 django-v5.0.14: django==5.0.14 - django-v5.2.4: django==5.2.4 + django-v5.2.5: django==5.2.5 django: psycopg2-binary django: djangorestframework django: pytest-django @@ -654,12 +655,12 @@ deps = django-v3.2.25: channels[daphne] django-v4.2.23: channels[daphne] django-v5.0.14: channels[daphne] - django-v5.2.4: channels[daphne] + django-v5.2.5: channels[daphne] django-v2.2.28: six django-v3.2.25: pytest-asyncio django-v4.2.23: pytest-asyncio django-v5.0.14: pytest-asyncio - django-v5.2.4: pytest-asyncio + django-v5.2.5: pytest-asyncio django-v1.11.29: djangorestframework>=3.0,<4.0 django-v1.11.29: Werkzeug<2.1.0 django-v2.2.28: djangorestframework>=3.0,<4.0 @@ -725,18 +726,18 @@ deps = falcon-v2.0.0: falcon==2.0.0 falcon-v3.1.3: falcon==3.1.3 falcon-v4.0.2: falcon==4.0.2 - falcon-v4.1.0a3: falcon==4.1.0a3 + falcon-v4.1.0: falcon==4.1.0 litestar-v2.0.1: litestar==2.0.1 - litestar-v2.5.5: litestar==2.5.5 - litestar-v2.10.0: litestar==2.10.0 - litestar-v2.16.0: litestar==2.16.0 + litestar-v2.6.4: litestar==2.6.4 + litestar-v2.12.1: litestar==2.12.1 + litestar-v2.17.0: litestar==2.17.0 litestar: pytest-asyncio litestar: python-multipart litestar: requests litestar: cryptography litestar-v2.0.1: httpx<0.28 - litestar-v2.5.5: httpx<0.28 + litestar-v2.6.4: httpx<0.28 pyramid-v1.8.6: pyramid==1.8.6 pyramid-v1.10.8: pyramid==1.10.8 @@ -757,7 +758,7 @@ deps = tornado-v6.0.4: tornado==6.0.4 tornado-v6.2: tornado==6.2 tornado-v6.4.2: tornado==6.4.2 - tornado-v6.5.1: tornado==6.5.1 + tornado-v6.5.2: tornado==6.5.2 tornado: pytest tornado-v6.0.4: pytest<8.2 tornado-v6.2: pytest<8.2 @@ -772,7 +773,7 @@ deps = trytond-v5.8.16: trytond==5.8.16 trytond-v6.8.17: trytond==6.8.17 trytond-v7.0.34: trytond==7.0.34 - trytond-v7.6.4: trytond==7.6.4 + trytond-v7.6.5: trytond==7.6.5 trytond: werkzeug trytond-v4.6.22: werkzeug<1.0 trytond-v4.8.18: werkzeug<1.0