diff --git a/.github/ISSUE_TEMPLATE/bug.yml b/.github/ISSUE_TEMPLATE/bug.yml
index 78f1e03d21..c13d6c4bb0 100644
--- a/.github/ISSUE_TEMPLATE/bug.yml
+++ b/.github/ISSUE_TEMPLATE/bug.yml
@@ -1,5 +1,6 @@
name: 🐞 Bug Report
description: Tell us about something that's not working the way we (probably) intend.
+labels: ["Python", "Bug"]
body:
- type: dropdown
id: type
diff --git a/.github/ISSUE_TEMPLATE/feature.yml b/.github/ISSUE_TEMPLATE/feature.yml
index e462e3bae7..64b31873d8 100644
--- a/.github/ISSUE_TEMPLATE/feature.yml
+++ b/.github/ISSUE_TEMPLATE/feature.yml
@@ -1,6 +1,6 @@
name: 💡 Feature Request
description: Create a feature request for sentry-python SDK.
-labels: 'enhancement'
+labels: ["Python", "Feature"]
body:
- type: markdown
attributes:
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index a0e39a5784..34815da549 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -20,7 +20,7 @@ jobs:
steps:
- name: Get auth token
id: token
- uses: actions/create-github-app-token@3ff1caaa28b64c9cc276ce0a02e2ff584f3900c5 # v2.0.2
+ uses: actions/create-github-app-token@df432ceedc7162793a195dd1713ff69aefc7379e # v2.0.6
with:
app-id: ${{ vars.SENTRY_RELEASE_BOT_CLIENT_ID }}
private-key: ${{ secrets.SENTRY_RELEASE_BOT_PRIVATE_KEY }}
diff --git a/.github/workflows/test-integrations-ai.yml b/.github/workflows/test-integrations-ai.yml
index f392f57f46..bc89cb9afe 100644
--- a/.github/workflows/test-integrations-ai.yml
+++ b/.github/workflows/test-integrations-ai.yml
@@ -29,7 +29,7 @@ jobs:
strategy:
fail-fast: false
matrix:
- python-version: ["3.7","3.9","3.11","3.12"]
+ python-version: ["3.9","3.11","3.12"]
# python3.6 reached EOL and is no longer being supported on
# new versions of hosted runners on Github Actions
# ubuntu-20.04 is the last version that supported python3.6
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 786a9a34e5..81f749d83a 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,25 @@
# Changelog
+## 2.28.0
+
+### Various fixes & improvements
+
+- fix(logs): Forward `extra` from logger as attributes (#4374) by @AbhiPrasad
+- fix(logs): Canonicalize paths from the logger integration (#4336) by @colin-sentry
+- fix(logs): Use new transport (#4317) by @colin-sentry
+- fix: Deprecate `set_measurement()` API. (#3934) by @antonpirker
+- fix: Put feature flags on isolation scope (#4363) by @antonpirker
+- fix: Make use of `SPANDATA` consistent (#4373) by @antonpirker
+- fix: Discord link (#4371) by @sentrivana
+- tests: Pin snowballstemmer for now (#4372) by @sentrivana
+- tests: Regular tox update (#4367) by @sentrivana
+- tests: Bump test timeout for recursion stacktrace extract to 2s (#4351) by @booxter
+- tests: Fix test_stacktrace_big_recursion failure due to argv (#4346) by @booxter
+- tests: Move anthropic under toxgen (#4348) by @sentrivana
+- tests: Update tox.ini (#4347) by @sentrivana
+- chore: Update GH issue templates for Linear compatibility (#4328) by @stephanie-anderson
+- chore: Bump actions/create-github-app-token from 2.0.2 to 2.0.6 (#4358) by @dependabot
+
## 2.27.0
### Various fixes & improvements
diff --git a/README.md b/README.md
index 10bc8eb2ed..a3afdc6e72 100644
--- a/README.md
+++ b/README.md
@@ -6,7 +6,7 @@
_Bad software is everywhere, and we're tired of it. Sentry is on a mission to help developers write better software faster, so we can get back to enjoying technology. If you want to join us
[**Check out our open positions**](https://sentry.io/careers/)_.
-[](https://discord.gg/wdNEHETs87)
+[](https://discord.com/invite/Ww9hbqr)
[](https://twitter.com/intent/follow?screen_name=getsentry)
[](https://pypi.python.org/pypi/sentry-sdk)
@@ -106,7 +106,7 @@ If you encounter issues or need help setting up or configuring the SDK, don't he
Here are all resources to help you make the most of Sentry:
- [Documentation](https://docs.sentry.io/platforms/python/) - Official documentation to get started.
-- [Discord](https://img.shields.io/discord/621778831602221064) - Join our Discord community.
+- [Discord](https://discord.com/invite/Ww9hbqr) - Join our Discord community.
- [X/Twitter](https://twitter.com/intent/follow?screen_name=getsentry) - Follow us on X (Twitter) for updates.
- [Stack Overflow](https://stackoverflow.com/questions/tagged/sentry) - Questions and answers related to Sentry.
diff --git a/docs/api.rst b/docs/api.rst
index 87c2535abd..a6fb49346d 100644
--- a/docs/api.rst
+++ b/docs/api.rst
@@ -25,6 +25,7 @@ Capturing Data
Enriching Events
================
+.. autofunction:: sentry_sdk.api.add_attachment
.. autofunction:: sentry_sdk.api.add_breadcrumb
.. autofunction:: sentry_sdk.api.set_context
.. autofunction:: sentry_sdk.api.set_extra
@@ -63,4 +64,3 @@ Managing Scope (advanced)
.. autofunction:: sentry_sdk.api.push_scope
.. autofunction:: sentry_sdk.api.new_scope
-
diff --git a/docs/conf.py b/docs/conf.py
index 709f557d16..34c88ae6cd 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -31,7 +31,7 @@
copyright = "2019-{}, Sentry Team and Contributors".format(datetime.now().year)
author = "Sentry Team and Contributors"
-release = "2.27.0"
+release = "2.28.0"
version = ".".join(release.split(".")[:2]) # The short X.Y version.
diff --git a/requirements-docs.txt b/requirements-docs.txt
index 81e04ba3ef..a662a0d83f 100644
--- a/requirements-docs.txt
+++ b/requirements-docs.txt
@@ -3,3 +3,4 @@ shibuya
sphinx<8.2
sphinx-autodoc-typehints[type_comments]>=1.8.0
typing-extensions
+snowballstemmer<3.0
diff --git a/scripts/populate_tox/config.py b/scripts/populate_tox/config.py
index f874ff8a9c..4d5d5b14ce 100644
--- a/scripts/populate_tox/config.py
+++ b/scripts/populate_tox/config.py
@@ -14,6 +14,14 @@
},
"python": ">=3.7",
},
+ "anthropic": {
+ "package": "anthropic",
+ "deps": {
+ "*": ["pytest-asyncio"],
+ "<0.50": ["httpx<0.28.0"],
+ },
+ "python": ">=3.8",
+ },
"ariadne": {
"package": "ariadne",
"deps": {
diff --git a/scripts/populate_tox/populate_tox.py b/scripts/populate_tox/populate_tox.py
index c04ab1b209..0aeb0f02ef 100644
--- a/scripts/populate_tox/populate_tox.py
+++ b/scripts/populate_tox/populate_tox.py
@@ -67,7 +67,6 @@
"potel",
# Integrations that can be migrated -- we should eventually remove all
# of these from the IGNORE list
- "anthropic",
"arq",
"asyncpg",
"beam",
diff --git a/scripts/populate_tox/tox.jinja b/scripts/populate_tox/tox.jinja
index 3cfb5e1252..2869da275b 100644
--- a/scripts/populate_tox/tox.jinja
+++ b/scripts/populate_tox/tox.jinja
@@ -36,10 +36,6 @@ envlist =
# At a minimum, we should test against at least the lowest
# and the latest supported version of a framework.
- # Anthropic
- {py3.8,py3.11,py3.12}-anthropic-v{0.16,0.28,0.40}
- {py3.7,py3.11,py3.12}-anthropic-latest
-
# Arq
{py3.7,py3.11}-arq-v{0.23}
{py3.7,py3.12,py3.13}-arq-latest
@@ -179,14 +175,6 @@ deps =
# === Integrations ===
- # Anthropic
- anthropic: pytest-asyncio
- anthropic-v{0.16,0.28}: httpx<0.28.0
- anthropic-v0.16: anthropic~=0.16.0
- anthropic-v0.28: anthropic~=0.28.0
- anthropic-v0.40: anthropic~=0.40.0
- anthropic-latest: anthropic
-
# Arq
arq-v0.23: arq~=0.23.0
arq-v0.23: pydantic<2
diff --git a/sentry_sdk/__init__.py b/sentry_sdk/__init__.py
index b4859cc5d2..9fd7253fc2 100644
--- a/sentry_sdk/__init__.py
+++ b/sentry_sdk/__init__.py
@@ -15,6 +15,7 @@
"integrations",
# From sentry_sdk.api
"init",
+ "add_attachment",
"add_breadcrumb",
"capture_event",
"capture_exception",
diff --git a/sentry_sdk/_log_batcher.py b/sentry_sdk/_log_batcher.py
index 77efe29a2c..87bebdb226 100644
--- a/sentry_sdk/_log_batcher.py
+++ b/sentry_sdk/_log_batcher.py
@@ -5,7 +5,7 @@
from typing import Optional, List, Callable, TYPE_CHECKING, Any
from sentry_sdk.utils import format_timestamp, safe_repr
-from sentry_sdk.envelope import Envelope
+from sentry_sdk.envelope import Envelope, Item, PayloadRef
if TYPE_CHECKING:
from sentry_sdk._types import Log
@@ -97,34 +97,36 @@ def flush(self):
self._flush()
@staticmethod
- def _log_to_otel(log):
+ def _log_to_transport_format(log):
# type: (Log) -> Any
- def format_attribute(key, val):
- # type: (str, int | float | str | bool) -> Any
+ def format_attribute(val):
+ # type: (int | float | str | bool) -> Any
if isinstance(val, bool):
- return {"key": key, "value": {"boolValue": val}}
+ return {"value": val, "type": "boolean"}
if isinstance(val, int):
- return {"key": key, "value": {"intValue": str(val)}}
+ return {"value": val, "type": "integer"}
if isinstance(val, float):
- return {"key": key, "value": {"doubleValue": val}}
+ return {"value": val, "type": "double"}
if isinstance(val, str):
- return {"key": key, "value": {"stringValue": val}}
- return {"key": key, "value": {"stringValue": safe_repr(val)}}
-
- otel_log = {
- "severityText": log["severity_text"],
- "severityNumber": log["severity_number"],
- "body": {"stringValue": log["body"]},
- "timeUnixNano": str(log["time_unix_nano"]),
- "attributes": [
- format_attribute(k, v) for (k, v) in log["attributes"].items()
- ],
+ return {"value": val, "type": "string"}
+ return {"value": safe_repr(val), "type": "string"}
+
+ if "sentry.severity_number" not in log["attributes"]:
+ log["attributes"]["sentry.severity_number"] = log["severity_number"]
+ if "sentry.severity_text" not in log["attributes"]:
+ log["attributes"]["sentry.severity_text"] = log["severity_text"]
+
+ res = {
+ "timestamp": int(log["time_unix_nano"]) / 1.0e9,
+ "trace_id": log.get("trace_id", "00000000-0000-0000-0000-000000000000"),
+ "level": str(log["severity_text"]),
+ "body": str(log["body"]),
+ "attributes": {
+ k: format_attribute(v) for (k, v) in log["attributes"].items()
+ },
}
- if "trace_id" in log:
- otel_log["traceId"] = log["trace_id"]
-
- return otel_log
+ return res
def _flush(self):
# type: (...) -> Optional[Envelope]
@@ -133,10 +135,27 @@ def _flush(self):
headers={"sent_at": format_timestamp(datetime.now(timezone.utc))}
)
with self._lock:
- for log in self._log_buffer:
- envelope.add_log(self._log_to_otel(log))
+ if len(self._log_buffer) == 0:
+ return None
+
+ envelope.add_item(
+ Item(
+ type="log",
+ content_type="application/vnd.sentry.items.log+json",
+ headers={
+ "item_count": len(self._log_buffer),
+ },
+ payload=PayloadRef(
+ json={
+ "items": [
+ self._log_to_transport_format(log)
+ for log in self._log_buffer
+ ]
+ }
+ ),
+ )
+ )
self._log_buffer.clear()
- if envelope.items:
- self._capture_func(envelope)
- return envelope
- return None
+
+ self._capture_func(envelope)
+ return envelope
diff --git a/sentry_sdk/ai/monitoring.py b/sentry_sdk/ai/monitoring.py
index 860833b8f5..ed33acd0f1 100644
--- a/sentry_sdk/ai/monitoring.py
+++ b/sentry_sdk/ai/monitoring.py
@@ -1,6 +1,7 @@
import inspect
from functools import wraps
+from sentry_sdk.consts import SPANDATA
import sentry_sdk.utils
from sentry_sdk import start_span
from sentry_sdk.tracing import Span
@@ -39,7 +40,7 @@ def sync_wrapped(*args, **kwargs):
for k, v in kwargs.pop("sentry_data", {}).items():
span.set_data(k, v)
if curr_pipeline:
- span.set_data("ai.pipeline.name", curr_pipeline)
+ span.set_data(SPANDATA.AI_PIPELINE_NAME, curr_pipeline)
return f(*args, **kwargs)
else:
_ai_pipeline_name.set(description)
@@ -68,7 +69,7 @@ async def async_wrapped(*args, **kwargs):
for k, v in kwargs.pop("sentry_data", {}).items():
span.set_data(k, v)
if curr_pipeline:
- span.set_data("ai.pipeline.name", curr_pipeline)
+ span.set_data(SPANDATA.AI_PIPELINE_NAME, curr_pipeline)
return await f(*args, **kwargs)
else:
_ai_pipeline_name.set(description)
@@ -100,7 +101,7 @@ def record_token_usage(
# type: (Span, Optional[int], Optional[int], Optional[int]) -> None
ai_pipeline_name = get_ai_pipeline_name()
if ai_pipeline_name:
- span.set_data("ai.pipeline.name", ai_pipeline_name)
+ span.set_data(SPANDATA.AI_PIPELINE_NAME, ai_pipeline_name)
if prompt_tokens is not None:
span.set_measurement("ai_prompt_tokens_used", value=prompt_tokens)
if completion_tokens is not None:
diff --git a/sentry_sdk/api.py b/sentry_sdk/api.py
index d60434079c..e56109cbd0 100644
--- a/sentry_sdk/api.py
+++ b/sentry_sdk/api.py
@@ -51,6 +51,7 @@ def overload(x):
# When changing this, update __all__ in __init__.py too
__all__ = [
"init",
+ "add_attachment",
"add_breadcrumb",
"capture_event",
"capture_exception",
@@ -184,6 +185,20 @@ def capture_exception(
return get_current_scope().capture_exception(error, scope=scope, **scope_kwargs)
+@scopemethod
+def add_attachment(
+ bytes=None, # type: Union[None, bytes, Callable[[], bytes]]
+ filename=None, # type: Optional[str]
+ path=None, # type: Optional[str]
+ content_type=None, # type: Optional[str]
+ add_to_transactions=False, # type: bool
+):
+ # type: (...) -> None
+ return get_isolation_scope().add_attachment(
+ bytes, filename, path, content_type, add_to_transactions
+ )
+
+
@scopemethod
def add_breadcrumb(
crumb=None, # type: Optional[Breadcrumb]
@@ -388,6 +403,10 @@ def start_transaction(
def set_measurement(name, value, unit=""):
# type: (str, float, MeasurementUnit) -> None
+ """
+ .. deprecated:: 2.28.0
+ This function is deprecated and will be removed in the next major release.
+ """
transaction = get_current_scope().transaction
if transaction is not None:
transaction.set_measurement(name, value, unit)
diff --git a/sentry_sdk/consts.py b/sentry_sdk/consts.py
index e1f18fe4ae..2ceec2738b 100644
--- a/sentry_sdk/consts.py
+++ b/sentry_sdk/consts.py
@@ -187,7 +187,7 @@ class SPANDATA:
For an AI model call, the format of the response
"""
- AI_LOGIT_BIAS = "ai.response_format"
+ AI_LOGIT_BIAS = "ai.logit_bias"
"""
For an AI model call, the logit bias
"""
@@ -204,7 +204,6 @@ class SPANDATA:
Minimize pre-processing done to the prompt sent to the LLM.
Example: true
"""
-
AI_RESPONSES = "ai.responses"
"""
The responses to an AI model call. Always as a list.
@@ -217,6 +216,66 @@ class SPANDATA:
Example: 123.45
"""
+ AI_CITATIONS = "ai.citations"
+ """
+ References or sources cited by the AI model in its response.
+ Example: ["Smith et al. 2020", "Jones 2019"]
+ """
+
+ AI_DOCUMENTS = "ai.documents"
+ """
+ Documents or content chunks used as context for the AI model.
+ Example: ["doc1.txt", "doc2.pdf"]
+ """
+
+ AI_SEARCH_QUERIES = "ai.search_queries"
+ """
+ Queries used to search for relevant context or documents.
+ Example: ["climate change effects", "renewable energy"]
+ """
+
+ AI_SEARCH_RESULTS = "ai.search_results"
+ """
+ Results returned from search queries for context.
+ Example: ["Result 1", "Result 2"]
+ """
+
+ AI_GENERATION_ID = "ai.generation_id"
+ """
+ Unique identifier for the completion.
+ Example: "gen_123abc"
+ """
+
+ AI_SEARCH_REQUIRED = "ai.is_search_required"
+ """
+ Boolean indicating if the model needs to perform a search.
+ Example: true
+ """
+
+ AI_FINISH_REASON = "ai.finish_reason"
+ """
+ The reason why the model stopped generating.
+ Example: "length"
+ """
+
+ AI_PIPELINE_NAME = "ai.pipeline.name"
+ """
+ Name of the AI pipeline or chain being executed.
+ Example: "qa-pipeline"
+ """
+
+ AI_TEXTS = "ai.texts"
+ """
+ Raw text inputs provided to the model.
+ Example: ["What is machine learning?"]
+ """
+
+ AI_WARNINGS = "ai.warnings"
+ """
+ Warning messages generated during model execution.
+ Example: ["Token limit exceeded"]
+ """
+
DB_NAME = "db.name"
"""
The name of the database being accessed. For commands that switch the database, this should be set to the target database (even if the command fails).
@@ -966,4 +1025,4 @@ def _get_default_options():
del _get_default_options
-VERSION = "2.27.0"
+VERSION = "2.28.0"
diff --git a/sentry_sdk/envelope.py b/sentry_sdk/envelope.py
index 044d282005..5f7220bf21 100644
--- a/sentry_sdk/envelope.py
+++ b/sentry_sdk/envelope.py
@@ -106,12 +106,6 @@ def add_sessions(
# type: (...) -> None
self.add_item(Item(payload=PayloadRef(json=sessions), type="sessions"))
- def add_log(
- self, log # type: Any
- ):
- # type: (...) -> None
- self.add_item(Item(payload=PayloadRef(json=log), type="otel_log"))
-
def add_item(
self, item # type: Item
):
@@ -278,7 +272,7 @@ def data_category(self):
return "transaction"
elif ty == "event":
return "error"
- elif ty == "otel_log":
+ elif ty == "log":
return "log"
elif ty == "client_report":
return "internal"
diff --git a/sentry_sdk/feature_flags.py b/sentry_sdk/feature_flags.py
index dd8d41c32e..eb53acae5d 100644
--- a/sentry_sdk/feature_flags.py
+++ b/sentry_sdk/feature_flags.py
@@ -64,7 +64,7 @@ def add_feature_flag(flag, result):
Records a flag and its value to be sent on subsequent error events.
We recommend you do this on flag evaluations. Flags are buffered per Sentry scope.
"""
- flags = sentry_sdk.get_current_scope().flags
+ flags = sentry_sdk.get_isolation_scope().flags
flags.set(flag, result)
span = sentry_sdk.get_current_span()
diff --git a/sentry_sdk/integrations/cohere.py b/sentry_sdk/integrations/cohere.py
index b4c2af91da..433b285bf0 100644
--- a/sentry_sdk/integrations/cohere.py
+++ b/sentry_sdk/integrations/cohere.py
@@ -52,17 +52,17 @@
}
COLLECTED_CHAT_RESP_ATTRS = {
- "generation_id": "ai.generation_id",
- "is_search_required": "ai.is_search_required",
- "finish_reason": "ai.finish_reason",
+ "generation_id": SPANDATA.AI_GENERATION_ID,
+ "is_search_required": SPANDATA.AI_SEARCH_REQUIRED,
+ "finish_reason": SPANDATA.AI_FINISH_REASON,
}
COLLECTED_PII_CHAT_RESP_ATTRS = {
- "citations": "ai.citations",
- "documents": "ai.documents",
- "search_queries": "ai.search_queries",
- "search_results": "ai.search_results",
- "tool_calls": "ai.tool_calls",
+ "citations": SPANDATA.AI_CITATIONS,
+ "documents": SPANDATA.AI_DOCUMENTS,
+ "search_queries": SPANDATA.AI_SEARCH_QUERIES,
+ "search_results": SPANDATA.AI_SEARCH_RESULTS,
+ "tool_calls": SPANDATA.AI_TOOL_CALLS,
}
@@ -127,7 +127,7 @@ def collect_chat_response_fields(span, res, include_pii):
)
if hasattr(res.meta, "warnings"):
- set_data_normalized(span, "ai.warnings", res.meta.warnings)
+ set_data_normalized(span, SPANDATA.AI_WARNINGS, res.meta.warnings)
@wraps(f)
def new_chat(*args, **kwargs):
@@ -238,7 +238,7 @@ def new_embed(*args, **kwargs):
should_send_default_pii() and integration.include_prompts
):
if isinstance(kwargs["texts"], str):
- set_data_normalized(span, "ai.texts", [kwargs["texts"]])
+ set_data_normalized(span, SPANDATA.AI_TEXTS, [kwargs["texts"]])
elif (
isinstance(kwargs["texts"], list)
and len(kwargs["texts"]) > 0
diff --git a/sentry_sdk/integrations/huggingface_hub.py b/sentry_sdk/integrations/huggingface_hub.py
index d09f6e2163..dfac77e996 100644
--- a/sentry_sdk/integrations/huggingface_hub.py
+++ b/sentry_sdk/integrations/huggingface_hub.py
@@ -97,7 +97,7 @@ def new_text_generation(*args, **kwargs):
if should_send_default_pii() and integration.include_prompts:
set_data_normalized(
span,
- "ai.responses",
+ SPANDATA.AI_RESPONSES,
[res],
)
span.__exit__(None, None, None)
@@ -107,7 +107,7 @@ def new_text_generation(*args, **kwargs):
if should_send_default_pii() and integration.include_prompts:
set_data_normalized(
span,
- "ai.responses",
+ SPANDATA.AI_RESPONSES,
[res.generated_text],
)
if res.details is not None and res.details.generated_tokens > 0:
diff --git a/sentry_sdk/integrations/logging.py b/sentry_sdk/integrations/logging.py
index bf538ac7c7..74baf3d33a 100644
--- a/sentry_sdk/integrations/logging.py
+++ b/sentry_sdk/integrations/logging.py
@@ -348,16 +348,15 @@ def emit(self, record):
if not client.options["_experiments"].get("enable_logs", False):
return
- SentryLogsHandler._capture_log_from_record(client, record)
+ self._capture_log_from_record(client, record)
- @staticmethod
- def _capture_log_from_record(client, record):
+ def _capture_log_from_record(self, client, record):
# type: (BaseClient, LogRecord) -> None
scope = sentry_sdk.get_current_scope()
otel_severity_number, otel_severity_text = _python_level_to_otel(record.levelno)
- attrs = {
- "sentry.origin": "auto.logger.log",
- } # type: dict[str, str | bool | float | int]
+ project_root = client.options["project_root"]
+ attrs = self._extra_from_record(record) # type: Any
+ attrs["sentry.origin"] = "auto.logger.log"
if isinstance(record.msg, str):
attrs["sentry.message.template"] = record.msg
if record.args is not None:
@@ -374,7 +373,10 @@ def _capture_log_from_record(client, record):
if record.lineno:
attrs["code.line.number"] = record.lineno
if record.pathname:
- attrs["code.file.path"] = record.pathname
+ if project_root is not None and record.pathname.startswith(project_root):
+ attrs["code.file.path"] = record.pathname[len(project_root) + 1 :]
+ else:
+ attrs["code.file.path"] = record.pathname
if record.funcName:
attrs["code.function.name"] = record.funcName
diff --git a/sentry_sdk/integrations/openai.py b/sentry_sdk/integrations/openai.py
index 61d335b170..e95753f6e1 100644
--- a/sentry_sdk/integrations/openai.py
+++ b/sentry_sdk/integrations/openai.py
@@ -155,7 +155,7 @@ def _new_chat_completion_common(f, *args, **kwargs):
if should_send_default_pii() and integration.include_prompts:
set_data_normalized(
span,
- "ai.responses",
+ SPANDATA.AI_RESPONSES,
list(map(lambda x: x.message, res.choices)),
)
_calculate_chat_completion_usage(
@@ -329,15 +329,15 @@ def _new_embeddings_create_common(f, *args, **kwargs):
should_send_default_pii() and integration.include_prompts
):
if isinstance(kwargs["input"], str):
- set_data_normalized(span, "ai.input_messages", [kwargs["input"]])
+ set_data_normalized(span, SPANDATA.AI_INPUT_MESSAGES, [kwargs["input"]])
elif (
isinstance(kwargs["input"], list)
and len(kwargs["input"]) > 0
and isinstance(kwargs["input"][0], str)
):
- set_data_normalized(span, "ai.input_messages", kwargs["input"])
+ set_data_normalized(span, SPANDATA.AI_INPUT_MESSAGES, kwargs["input"])
if "model" in kwargs:
- set_data_normalized(span, "ai.model_id", kwargs["model"])
+ set_data_normalized(span, SPANDATA.AI_MODEL_ID, kwargs["model"])
response = yield f, args, kwargs
diff --git a/sentry_sdk/tracing.py b/sentry_sdk/tracing.py
index ca249fe8fe..fc40221b9f 100644
--- a/sentry_sdk/tracing.py
+++ b/sentry_sdk/tracing.py
@@ -613,6 +613,16 @@ def set_status(self, value):
def set_measurement(self, name, value, unit=""):
# type: (str, float, MeasurementUnit) -> None
+ """
+ .. deprecated:: 2.28.0
+ This function is deprecated and will be removed in the next major release.
+ """
+
+ warnings.warn(
+ "`set_measurement()` is deprecated and will be removed in the next major version. Please use `set_data()` instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
self._measurements[name] = {"value": value, "unit": unit}
def set_thread(self, thread_id, thread_name):
@@ -1061,6 +1071,16 @@ def finish(
def set_measurement(self, name, value, unit=""):
# type: (str, float, MeasurementUnit) -> None
+ """
+ .. deprecated:: 2.28.0
+ This function is deprecated and will be removed in the next major release.
+ """
+
+ warnings.warn(
+ "`set_measurement()` is deprecated and will be removed in the next major version. Please use `set_data()` instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
self._measurements[name] = {"value": value, "unit": unit}
def set_context(self, key, value):
diff --git a/setup.py b/setup.py
index 877585472b..8fd1ae6293 100644
--- a/setup.py
+++ b/setup.py
@@ -21,7 +21,7 @@ def get_file_text(file_name):
setup(
name="sentry-sdk",
- version="2.27.0",
+ version="2.28.0",
author="Sentry Team and Contributors",
author_email="hello@sentry.io",
url="https://github.com/getsentry/sentry-python",
diff --git a/tests/integrations/anthropic/test_anthropic.py b/tests/integrations/anthropic/test_anthropic.py
index 7f6622a1ba..9ab0f879d1 100644
--- a/tests/integrations/anthropic/test_anthropic.py
+++ b/tests/integrations/anthropic/test_anthropic.py
@@ -128,7 +128,7 @@ def test_nonstreaming_create_message(
assert span["measurements"]["ai_prompt_tokens_used"]["value"] == 10
assert span["measurements"]["ai_completion_tokens_used"]["value"] == 20
assert span["measurements"]["ai_total_tokens_used"]["value"] == 30
- assert span["data"]["ai.streaming"] is False
+ assert span["data"][SPANDATA.AI_STREAMING] is False
@pytest.mark.asyncio
@@ -196,7 +196,7 @@ async def test_nonstreaming_create_message_async(
assert span["measurements"]["ai_prompt_tokens_used"]["value"] == 10
assert span["measurements"]["ai_completion_tokens_used"]["value"] == 20
assert span["measurements"]["ai_total_tokens_used"]["value"] == 30
- assert span["data"]["ai.streaming"] is False
+ assert span["data"][SPANDATA.AI_STREAMING] is False
@pytest.mark.parametrize(
@@ -296,7 +296,7 @@ def test_streaming_create_message(
assert span["measurements"]["ai_prompt_tokens_used"]["value"] == 10
assert span["measurements"]["ai_completion_tokens_used"]["value"] == 30
assert span["measurements"]["ai_total_tokens_used"]["value"] == 40
- assert span["data"]["ai.streaming"] is True
+ assert span["data"][SPANDATA.AI_STREAMING] is True
@pytest.mark.asyncio
@@ -399,7 +399,7 @@ async def test_streaming_create_message_async(
assert span["measurements"]["ai_prompt_tokens_used"]["value"] == 10
assert span["measurements"]["ai_completion_tokens_used"]["value"] == 30
assert span["measurements"]["ai_total_tokens_used"]["value"] == 40
- assert span["data"]["ai.streaming"] is True
+ assert span["data"][SPANDATA.AI_STREAMING] is True
@pytest.mark.skipif(
@@ -528,7 +528,7 @@ def test_streaming_create_message_with_input_json_delta(
assert span["measurements"]["ai_prompt_tokens_used"]["value"] == 366
assert span["measurements"]["ai_completion_tokens_used"]["value"] == 51
assert span["measurements"]["ai_total_tokens_used"]["value"] == 417
- assert span["data"]["ai.streaming"] is True
+ assert span["data"][SPANDATA.AI_STREAMING] is True
@pytest.mark.asyncio
@@ -665,7 +665,7 @@ async def test_streaming_create_message_with_input_json_delta_async(
assert span["measurements"]["ai_prompt_tokens_used"]["value"] == 366
assert span["measurements"]["ai_completion_tokens_used"]["value"] == 51
assert span["measurements"]["ai_total_tokens_used"]["value"] == 417
- assert span["data"]["ai.streaming"] is True
+ assert span["data"][SPANDATA.AI_STREAMING] is True
def test_exception_message_create(sentry_init, capture_events):
@@ -810,7 +810,7 @@ def test_add_ai_data_to_span_with_input_json_delta(sentry_init):
assert span._data.get(SPANDATA.AI_RESPONSES) == [
{"type": "text", "text": "{'test': 'data','more': 'json'}"}
]
- assert span._data.get("ai.streaming") is True
+ assert span._data.get(SPANDATA.AI_STREAMING) is True
assert span._measurements.get("ai_prompt_tokens_used")["value"] == 10
assert span._measurements.get("ai_completion_tokens_used")["value"] == 20
assert span._measurements.get("ai_total_tokens_used")["value"] == 30
diff --git a/tests/integrations/cohere/test_cohere.py b/tests/integrations/cohere/test_cohere.py
index c0dff2214e..6c1185a28e 100644
--- a/tests/integrations/cohere/test_cohere.py
+++ b/tests/integrations/cohere/test_cohere.py
@@ -5,6 +5,7 @@
from cohere import Client, ChatMessage
from sentry_sdk import start_transaction
+from sentry_sdk.consts import SPANDATA
from sentry_sdk.integrations.cohere import CohereIntegration
from unittest import mock # python 3.3 and above
@@ -53,15 +54,15 @@ def test_nonstreaming_chat(
assert tx["type"] == "transaction"
span = tx["spans"][0]
assert span["op"] == "ai.chat_completions.create.cohere"
- assert span["data"]["ai.model_id"] == "some-model"
+ assert span["data"][SPANDATA.AI_MODEL_ID] == "some-model"
if send_default_pii and include_prompts:
- assert "some context" in span["data"]["ai.input_messages"][0]["content"]
- assert "hello" in span["data"]["ai.input_messages"][1]["content"]
- assert "the model response" in span["data"]["ai.responses"]
+ assert "some context" in span["data"][SPANDATA.AI_INPUT_MESSAGES][0]["content"]
+ assert "hello" in span["data"][SPANDATA.AI_INPUT_MESSAGES][1]["content"]
+ assert "the model response" in span["data"][SPANDATA.AI_RESPONSES]
else:
- assert "ai.input_messages" not in span["data"]
- assert "ai.responses" not in span["data"]
+ assert SPANDATA.AI_INPUT_MESSAGES not in span["data"]
+ assert SPANDATA.AI_RESPONSES not in span["data"]
assert span["measurements"]["ai_completion_tokens_used"]["value"] == 10
assert span["measurements"]["ai_prompt_tokens_used"]["value"] == 20
@@ -124,15 +125,15 @@ def test_streaming_chat(sentry_init, capture_events, send_default_pii, include_p
assert tx["type"] == "transaction"
span = tx["spans"][0]
assert span["op"] == "ai.chat_completions.create.cohere"
- assert span["data"]["ai.model_id"] == "some-model"
+ assert span["data"][SPANDATA.AI_MODEL_ID] == "some-model"
if send_default_pii and include_prompts:
- assert "some context" in span["data"]["ai.input_messages"][0]["content"]
- assert "hello" in span["data"]["ai.input_messages"][1]["content"]
- assert "the model response" in span["data"]["ai.responses"]
+ assert "some context" in span["data"][SPANDATA.AI_INPUT_MESSAGES][0]["content"]
+ assert "hello" in span["data"][SPANDATA.AI_INPUT_MESSAGES][1]["content"]
+ assert "the model response" in span["data"][SPANDATA.AI_RESPONSES]
else:
- assert "ai.input_messages" not in span["data"]
- assert "ai.responses" not in span["data"]
+ assert SPANDATA.AI_INPUT_MESSAGES not in span["data"]
+ assert SPANDATA.AI_RESPONSES not in span["data"]
assert span["measurements"]["ai_completion_tokens_used"]["value"] == 10
assert span["measurements"]["ai_prompt_tokens_used"]["value"] == 20
@@ -194,9 +195,9 @@ def test_embed(sentry_init, capture_events, send_default_pii, include_prompts):
span = tx["spans"][0]
assert span["op"] == "ai.embeddings.create.cohere"
if send_default_pii and include_prompts:
- assert "hello" in span["data"]["ai.input_messages"]
+ assert "hello" in span["data"][SPANDATA.AI_INPUT_MESSAGES]
else:
- assert "ai.input_messages" not in span["data"]
+ assert SPANDATA.AI_INPUT_MESSAGES not in span["data"]
assert span["measurements"]["ai_prompt_tokens_used"]["value"] == 10
assert span["measurements"]["ai_total_tokens_used"]["value"] == 10
diff --git a/tests/integrations/fastapi/test_fastapi.py b/tests/integrations/fastapi/test_fastapi.py
index 95838b1009..3d79da92cc 100644
--- a/tests/integrations/fastapi/test_fastapi.py
+++ b/tests/integrations/fastapi/test_fastapi.py
@@ -10,7 +10,9 @@
from fastapi.testclient import TestClient
from fastapi.middleware.trustedhost import TrustedHostMiddleware
+import sentry_sdk
from sentry_sdk import capture_message
+from sentry_sdk.feature_flags import add_feature_flag
from sentry_sdk.integrations.asgi import SentryAsgiMiddleware
from sentry_sdk.integrations.fastapi import FastApiIntegration
from sentry_sdk.integrations.starlette import StarletteIntegration
@@ -714,3 +716,41 @@ async def subapp_route():
assert event["transaction"] == "/subapp"
else:
assert event["transaction"].endswith("subapp_route")
+
+
+@pytest.mark.asyncio
+async def test_feature_flags(sentry_init, capture_events):
+ sentry_init(
+ traces_sample_rate=1.0,
+ integrations=[StarletteIntegration(), FastApiIntegration()],
+ )
+
+ events = capture_events()
+
+ app = FastAPI()
+
+ @app.get("/error")
+ async def _error():
+ add_feature_flag("hello", False)
+
+ with sentry_sdk.start_span(name="test-span"):
+ with sentry_sdk.start_span(name="test-span-2"):
+ raise ValueError("something is wrong!")
+
+ try:
+ client = TestClient(app)
+ client.get("/error")
+ except ValueError:
+ pass
+
+ found = False
+ for event in events:
+ if "exception" in event.keys():
+ assert event["contexts"]["flags"] == {
+ "values": [
+ {"flag": "hello", "result": False},
+ ]
+ }
+ found = True
+
+ assert found, "No event with exception found"
diff --git a/tests/integrations/huggingface_hub/test_huggingface_hub.py b/tests/integrations/huggingface_hub/test_huggingface_hub.py
index 090b0e4f3e..ee47cc7e56 100644
--- a/tests/integrations/huggingface_hub/test_huggingface_hub.py
+++ b/tests/integrations/huggingface_hub/test_huggingface_hub.py
@@ -8,6 +8,7 @@
from huggingface_hub.errors import OverloadedError
from sentry_sdk import start_transaction
+from sentry_sdk.consts import SPANDATA
from sentry_sdk.integrations.huggingface_hub import HuggingfaceHubIntegration
@@ -67,11 +68,11 @@ def test_nonstreaming_chat_completion(
assert span["op"] == "ai.chat_completions.create.huggingface_hub"
if send_default_pii and include_prompts:
- assert "hello" in span["data"]["ai.input_messages"]
- assert "the model response" in span["data"]["ai.responses"]
+ assert "hello" in span["data"][SPANDATA.AI_INPUT_MESSAGES]
+ assert "the model response" in span["data"][SPANDATA.AI_RESPONSES]
else:
- assert "ai.input_messages" not in span["data"]
- assert "ai.responses" not in span["data"]
+ assert SPANDATA.AI_INPUT_MESSAGES not in span["data"]
+ assert SPANDATA.AI_RESPONSES not in span["data"]
if details_arg:
assert span["measurements"]["ai_total_tokens_used"]["value"] == 10
@@ -126,11 +127,11 @@ def test_streaming_chat_completion(
assert span["op"] == "ai.chat_completions.create.huggingface_hub"
if send_default_pii and include_prompts:
- assert "hello" in span["data"]["ai.input_messages"]
- assert "the model response" in span["data"]["ai.responses"]
+ assert "hello" in span["data"][SPANDATA.AI_INPUT_MESSAGES]
+ assert "the model response" in span["data"][SPANDATA.AI_RESPONSES]
else:
- assert "ai.input_messages" not in span["data"]
- assert "ai.responses" not in span["data"]
+ assert SPANDATA.AI_INPUT_MESSAGES not in span["data"]
+ assert SPANDATA.AI_RESPONSES not in span["data"]
if details_arg:
assert span["measurements"]["ai_total_tokens_used"]["value"] == 10
diff --git a/tests/integrations/langchain/test_langchain.py b/tests/integrations/langchain/test_langchain.py
index b9e5705b88..3f1b3b1da5 100644
--- a/tests/integrations/langchain/test_langchain.py
+++ b/tests/integrations/langchain/test_langchain.py
@@ -3,6 +3,8 @@
import pytest
+from sentry_sdk.consts import SPANDATA
+
try:
# Langchain >= 0.2
from langchain_openai import ChatOpenAI
@@ -189,23 +191,23 @@ def test_langchain_agent(
if send_default_pii and include_prompts:
assert (
"You are very powerful"
- in chat_spans[0]["data"]["ai.input_messages"][0]["content"]
+ in chat_spans[0]["data"][SPANDATA.AI_INPUT_MESSAGES][0]["content"]
)
- assert "5" in chat_spans[0]["data"]["ai.responses"]
- assert "word" in tool_exec_span["data"]["ai.input_messages"]
- assert 5 == int(tool_exec_span["data"]["ai.responses"])
+ assert "5" in chat_spans[0]["data"][SPANDATA.AI_RESPONSES]
+ assert "word" in tool_exec_span["data"][SPANDATA.AI_INPUT_MESSAGES]
+ assert 5 == int(tool_exec_span["data"][SPANDATA.AI_RESPONSES])
assert (
"You are very powerful"
- in chat_spans[1]["data"]["ai.input_messages"][0]["content"]
+ in chat_spans[1]["data"][SPANDATA.AI_INPUT_MESSAGES][0]["content"]
)
- assert "5" in chat_spans[1]["data"]["ai.responses"]
+ assert "5" in chat_spans[1]["data"][SPANDATA.AI_RESPONSES]
else:
- assert "ai.input_messages" not in chat_spans[0].get("data", {})
- assert "ai.responses" not in chat_spans[0].get("data", {})
- assert "ai.input_messages" not in chat_spans[1].get("data", {})
- assert "ai.responses" not in chat_spans[1].get("data", {})
- assert "ai.input_messages" not in tool_exec_span.get("data", {})
- assert "ai.responses" not in tool_exec_span.get("data", {})
+ assert SPANDATA.AI_INPUT_MESSAGES not in chat_spans[0].get("data", {})
+ assert SPANDATA.AI_RESPONSES not in chat_spans[0].get("data", {})
+ assert SPANDATA.AI_INPUT_MESSAGES not in chat_spans[1].get("data", {})
+ assert SPANDATA.AI_RESPONSES not in chat_spans[1].get("data", {})
+ assert SPANDATA.AI_INPUT_MESSAGES not in tool_exec_span.get("data", {})
+ assert SPANDATA.AI_RESPONSES not in tool_exec_span.get("data", {})
def test_langchain_error(sentry_init, capture_events):
diff --git a/tests/integrations/openai/test_openai.py b/tests/integrations/openai/test_openai.py
index 011192e49f..3fdc138f39 100644
--- a/tests/integrations/openai/test_openai.py
+++ b/tests/integrations/openai/test_openai.py
@@ -7,6 +7,7 @@
from openai.types.create_embedding_response import Usage as EmbeddingTokenUsage
from sentry_sdk import start_transaction
+from sentry_sdk.consts import SPANDATA
from sentry_sdk.integrations.openai import (
OpenAIIntegration,
_calculate_chat_completion_usage,
@@ -83,11 +84,11 @@ def test_nonstreaming_chat_completion(
assert span["op"] == "ai.chat_completions.create.openai"
if send_default_pii and include_prompts:
- assert "hello" in span["data"]["ai.input_messages"]["content"]
- assert "the model response" in span["data"]["ai.responses"]["content"]
+ assert "hello" in span["data"][SPANDATA.AI_INPUT_MESSAGES]["content"]
+ assert "the model response" in span["data"][SPANDATA.AI_RESPONSES]["content"]
else:
- assert "ai.input_messages" not in span["data"]
- assert "ai.responses" not in span["data"]
+ assert SPANDATA.AI_INPUT_MESSAGES not in span["data"]
+ assert SPANDATA.AI_RESPONSES not in span["data"]
assert span["measurements"]["ai_completion_tokens_used"]["value"] == 10
assert span["measurements"]["ai_prompt_tokens_used"]["value"] == 20
@@ -125,11 +126,11 @@ async def test_nonstreaming_chat_completion_async(
assert span["op"] == "ai.chat_completions.create.openai"
if send_default_pii and include_prompts:
- assert "hello" in span["data"]["ai.input_messages"]["content"]
- assert "the model response" in span["data"]["ai.responses"]["content"]
+ assert "hello" in span["data"][SPANDATA.AI_INPUT_MESSAGES]["content"]
+ assert "the model response" in span["data"][SPANDATA.AI_RESPONSES]["content"]
else:
- assert "ai.input_messages" not in span["data"]
- assert "ai.responses" not in span["data"]
+ assert SPANDATA.AI_INPUT_MESSAGES not in span["data"]
+ assert SPANDATA.AI_RESPONSES not in span["data"]
assert span["measurements"]["ai_completion_tokens_used"]["value"] == 10
assert span["measurements"]["ai_prompt_tokens_used"]["value"] == 20
@@ -218,11 +219,11 @@ def test_streaming_chat_completion(
assert span["op"] == "ai.chat_completions.create.openai"
if send_default_pii and include_prompts:
- assert "hello" in span["data"]["ai.input_messages"]["content"]
- assert "hello world" in span["data"]["ai.responses"]
+ assert "hello" in span["data"][SPANDATA.AI_INPUT_MESSAGES]["content"]
+ assert "hello world" in span["data"][SPANDATA.AI_RESPONSES]
else:
- assert "ai.input_messages" not in span["data"]
- assert "ai.responses" not in span["data"]
+ assert SPANDATA.AI_INPUT_MESSAGES not in span["data"]
+ assert SPANDATA.AI_RESPONSES not in span["data"]
try:
import tiktoken # type: ignore # noqa # pylint: disable=unused-import
@@ -314,11 +315,11 @@ async def test_streaming_chat_completion_async(
assert span["op"] == "ai.chat_completions.create.openai"
if send_default_pii and include_prompts:
- assert "hello" in span["data"]["ai.input_messages"]["content"]
- assert "hello world" in span["data"]["ai.responses"]
+ assert "hello" in span["data"][SPANDATA.AI_INPUT_MESSAGES]["content"]
+ assert "hello world" in span["data"][SPANDATA.AI_RESPONSES]
else:
- assert "ai.input_messages" not in span["data"]
- assert "ai.responses" not in span["data"]
+ assert SPANDATA.AI_INPUT_MESSAGES not in span["data"]
+ assert SPANDATA.AI_RESPONSES not in span["data"]
try:
import tiktoken # type: ignore # noqa # pylint: disable=unused-import
@@ -404,9 +405,9 @@ def test_embeddings_create(
span = tx["spans"][0]
assert span["op"] == "ai.embeddings.create.openai"
if send_default_pii and include_prompts:
- assert "hello" in span["data"]["ai.input_messages"]
+ assert "hello" in span["data"][SPANDATA.AI_INPUT_MESSAGES]
else:
- assert "ai.input_messages" not in span["data"]
+ assert SPANDATA.AI_INPUT_MESSAGES not in span["data"]
assert span["measurements"]["ai_prompt_tokens_used"]["value"] == 20
assert span["measurements"]["ai_total_tokens_used"]["value"] == 30
@@ -452,9 +453,9 @@ async def test_embeddings_create_async(
span = tx["spans"][0]
assert span["op"] == "ai.embeddings.create.openai"
if send_default_pii and include_prompts:
- assert "hello" in span["data"]["ai.input_messages"]
+ assert "hello" in span["data"][SPANDATA.AI_INPUT_MESSAGES]
else:
- assert "ai.input_messages" not in span["data"]
+ assert SPANDATA.AI_INPUT_MESSAGES not in span["data"]
assert span["measurements"]["ai_prompt_tokens_used"]["value"] == 20
assert span["measurements"]["ai_total_tokens_used"]["value"] == 30
diff --git a/tests/test_basics.py b/tests/test_basics.py
index 94ced5013a..0fdf9f811f 100644
--- a/tests/test_basics.py
+++ b/tests/test_basics.py
@@ -1151,14 +1151,12 @@ def recurse():
(event,) = events
assert event["exception"]["values"][0]["stacktrace"] is None
- assert event["_meta"] == {
- "exception": {
- "values": {"0": {"stacktrace": {"": {"rem": [["!config", "x"]]}}}}
- }
+ assert event["_meta"]["exception"] == {
+ "values": {"0": {"stacktrace": {"": {"rem": [["!config", "x"]]}}}}
}
# On my machine, it takes about 100-200ms to capture the exception,
# so this limit should be generous enough.
assert (
- capture_end_time - capture_start_time < 10**9
+ capture_end_time - capture_start_time < 10**9 * 2
), "stacktrace capture took too long, check that frame limit is set correctly"
diff --git a/tests/test_feature_flags.py b/tests/test_feature_flags.py
index 1b0ed13d49..e0ab1e254e 100644
--- a/tests/test_feature_flags.py
+++ b/tests/test_feature_flags.py
@@ -31,6 +31,63 @@ def test_featureflags_integration(sentry_init, capture_events, uninstall_integra
}
+@pytest.mark.asyncio
+async def test_featureflags_integration_spans_async(sentry_init, capture_events):
+ sentry_init(
+ traces_sample_rate=1.0,
+ )
+ events = capture_events()
+
+ add_feature_flag("hello", False)
+
+ try:
+ with sentry_sdk.start_span(name="test-span"):
+ with sentry_sdk.start_span(name="test-span-2"):
+ raise ValueError("something wrong!")
+ except ValueError as e:
+ sentry_sdk.capture_exception(e)
+
+ found = False
+ for event in events:
+ if "exception" in event.keys():
+ assert event["contexts"]["flags"] == {
+ "values": [
+ {"flag": "hello", "result": False},
+ ]
+ }
+ found = True
+
+ assert found, "No event with exception found"
+
+
+def test_featureflags_integration_spans_sync(sentry_init, capture_events):
+ sentry_init(
+ traces_sample_rate=1.0,
+ )
+ events = capture_events()
+
+ add_feature_flag("hello", False)
+
+ try:
+ with sentry_sdk.start_span(name="test-span"):
+ with sentry_sdk.start_span(name="test-span-2"):
+ raise ValueError("something wrong!")
+ except ValueError as e:
+ sentry_sdk.capture_exception(e)
+
+ found = False
+ for event in events:
+ if "exception" in event.keys():
+ assert event["contexts"]["flags"] == {
+ "values": [
+ {"flag": "hello", "result": False},
+ ]
+ }
+ found = True
+
+ assert found, "No event with exception found"
+
+
def test_featureflags_integration_threaded(
sentry_init, capture_events, uninstall_integration
):
diff --git a/tests/test_logs.py b/tests/test_logs.py
index 5ede277e3b..1f6b07e762 100644
--- a/tests/test_logs.py
+++ b/tests/test_logs.py
@@ -19,42 +19,44 @@
def otel_attributes_to_dict(otel_attrs):
- # type: (List[Mapping[str, Any]]) -> Mapping[str, Any]
+ # type: (Mapping[str, Any]) -> Mapping[str, Any]
def _convert_attr(attr):
# type: (Mapping[str, Union[str, float, bool]]) -> Any
- if "boolValue" in attr:
- return bool(attr["boolValue"])
- if "doubleValue" in attr:
- return float(attr["doubleValue"])
- if "intValue" in attr:
- return int(attr["intValue"])
- if attr["stringValue"].startswith("{"):
+ if attr["type"] == "boolean":
+ return attr["value"]
+ if attr["type"] == "double":
+ return attr["value"]
+ if attr["type"] == "integer":
+ return attr["value"]
+ if attr["value"].startswith("{"):
try:
- return json.loads(attr["stringValue"])
+ return json.loads(attr["value"])
except ValueError:
pass
- return str(attr["stringValue"])
+ return str(attr["value"])
- return {item["key"]: _convert_attr(item["value"]) for item in otel_attrs}
+ return {k: _convert_attr(v) for (k, v) in otel_attrs.items()}
def envelopes_to_logs(envelopes: List[Envelope]) -> List[Log]:
res = [] # type: List[Log]
for envelope in envelopes:
for item in envelope.items:
- if item.type == "otel_log":
- log_json = item.payload.json
- log = {
- "severity_text": log_json["severityText"],
- "severity_number": log_json["severityNumber"],
- "body": log_json["body"]["stringValue"],
- "attributes": otel_attributes_to_dict(log_json["attributes"]),
- "time_unix_nano": int(log_json["timeUnixNano"]),
- "trace_id": None,
- } # type: Log
- if "traceId" in log_json:
- log["trace_id"] = log_json["traceId"]
- res.append(log)
+ if item.type == "log":
+ for log_json in item.payload.json["items"]:
+ log = {
+ "severity_text": log_json["attributes"]["sentry.severity_text"][
+ "value"
+ ],
+ "severity_number": int(
+ log_json["attributes"]["sentry.severity_number"]["value"]
+ ),
+ "body": log_json["body"],
+ "attributes": otel_attributes_to_dict(log_json["attributes"]),
+ "time_unix_nano": int(float(log_json["timestamp"]) * 1e9),
+ "trace_id": log_json["trace_id"],
+ } # type: Log
+ res.append(log)
return res
@@ -344,7 +346,6 @@ def test_logging_errors(sentry_init, capture_envelopes):
error_event_2 = envelopes[1].items[0].payload.json
assert error_event_2["level"] == "error"
- print(envelopes)
logs = envelopes_to_logs(envelopes)
assert logs[0]["severity_text"] == "error"
assert "sentry.message.template" not in logs[0]["attributes"]
@@ -362,6 +363,108 @@ def test_logging_errors(sentry_init, capture_envelopes):
assert len(logs) == 2
+def test_log_strips_project_root(sentry_init, capture_envelopes):
+ """
+ The python logger should strip project roots from the log record path
+ """
+ sentry_init(
+ _experiments={"enable_logs": True},
+ project_root="/custom/test",
+ )
+ envelopes = capture_envelopes()
+
+ python_logger = logging.Logger("test-logger")
+ python_logger.handle(
+ logging.LogRecord(
+ name="test-logger",
+ level=logging.WARN,
+ pathname="/custom/test/blah/path.py",
+ lineno=123,
+ msg="This is a test log with a custom pathname",
+ args=(),
+ exc_info=None,
+ )
+ )
+ get_client().flush()
+
+ logs = envelopes_to_logs(envelopes)
+ assert len(logs) == 1
+ attrs = logs[0]["attributes"]
+ assert attrs["code.file.path"] == "blah/path.py"
+
+
+def test_logger_with_all_attributes(sentry_init, capture_envelopes):
+ """
+ The python logger should be able to log all attributes, including extra data.
+ """
+ sentry_init(_experiments={"enable_logs": True})
+ envelopes = capture_envelopes()
+
+ python_logger = logging.Logger("test-logger")
+ python_logger.warning(
+ "log #%d",
+ 1,
+ extra={"foo": "bar", "numeric": 42, "more_complex": {"nested": "data"}},
+ )
+ get_client().flush()
+
+ logs = envelopes_to_logs(envelopes)
+
+ attributes = logs[0]["attributes"]
+
+ assert "process.pid" in attributes
+ assert isinstance(attributes["process.pid"], int)
+ del attributes["process.pid"]
+
+ assert "sentry.release" in attributes
+ assert isinstance(attributes["sentry.release"], str)
+ del attributes["sentry.release"]
+
+ assert "server.address" in attributes
+ assert isinstance(attributes["server.address"], str)
+ del attributes["server.address"]
+
+ assert "thread.id" in attributes
+ assert isinstance(attributes["thread.id"], int)
+ del attributes["thread.id"]
+
+ assert "code.file.path" in attributes
+ assert isinstance(attributes["code.file.path"], str)
+ del attributes["code.file.path"]
+
+ assert "code.function.name" in attributes
+ assert isinstance(attributes["code.function.name"], str)
+ del attributes["code.function.name"]
+
+ assert "code.line.number" in attributes
+ assert isinstance(attributes["code.line.number"], int)
+ del attributes["code.line.number"]
+
+ assert "process.executable.name" in attributes
+ assert isinstance(attributes["process.executable.name"], str)
+ del attributes["process.executable.name"]
+
+ assert "thread.name" in attributes
+ assert isinstance(attributes["thread.name"], str)
+ del attributes["thread.name"]
+
+ # Assert on the remaining non-dynamic attributes.
+ assert attributes == {
+ "foo": "bar",
+ "numeric": 42,
+ "more_complex": "{'nested': 'data'}",
+ "logger.name": "test-logger",
+ "sentry.origin": "auto.logger.log",
+ "sentry.message.template": "log #%d",
+ "sentry.message.parameters.0": 1,
+ "sentry.environment": "production",
+ "sentry.sdk.name": "sentry.python",
+ "sentry.sdk.version": VERSION,
+ "sentry.severity_number": 13,
+ "sentry.severity_text": "warn",
+ }
+
+
def test_auto_flush_logs_after_100(sentry_init, capture_envelopes):
"""
If you log >100 logs, it should automatically trigger a flush.
diff --git a/tests/tracing/test_misc.py b/tests/tracing/test_misc.py
index 040fb24213..b954d36e1a 100644
--- a/tests/tracing/test_misc.py
+++ b/tests/tracing/test_misc.py
@@ -323,6 +323,48 @@ def test_set_meaurement_public_api(sentry_init, capture_events):
assert event["measurements"]["metric.bar"] == {"value": 456, "unit": "second"}
+def test_set_measurement_deprecated(sentry_init):
+ sentry_init(traces_sample_rate=1.0)
+
+ with start_transaction(name="measuring stuff") as trx:
+ with pytest.warns(DeprecationWarning):
+ set_measurement("metric.foo", 123)
+
+ with pytest.warns(DeprecationWarning):
+ trx.set_measurement("metric.bar", 456)
+
+ with start_span(op="measuring span") as span:
+ with pytest.warns(DeprecationWarning):
+ span.set_measurement("metric.baz", 420.69, unit="custom")
+
+
+def test_set_meaurement_compared_to_set_data(sentry_init, capture_events):
+ """
+ This is just a test to see the difference
+ between measurements and data in the resulting event payload.
+ """
+ sentry_init(traces_sample_rate=1.0)
+
+ events = capture_events()
+
+ with start_transaction(name="measuring stuff") as transaction:
+ transaction.set_measurement("metric.foo", 123)
+ transaction.set_data("metric.bar", 456)
+
+ with start_span(op="measuring span") as span:
+ span.set_measurement("metric.baz", 420.69, unit="custom")
+ span.set_data("metric.qux", 789)
+
+ (event,) = events
+ assert event["measurements"]["metric.foo"] == {"value": 123, "unit": ""}
+ assert event["contexts"]["trace"]["data"]["metric.bar"] == 456
+ assert event["spans"][0]["measurements"]["metric.baz"] == {
+ "value": 420.69,
+ "unit": "custom",
+ }
+ assert event["spans"][0]["data"]["metric.qux"] == 789
+
+
@pytest.mark.parametrize(
"trace_propagation_targets,url,expected_propagation_decision",
[
diff --git a/tox.ini b/tox.ini
index 6f3b9863e8..332f541793 100644
--- a/tox.ini
+++ b/tox.ini
@@ -10,7 +10,7 @@
# The file (and all resulting CI YAMLs) then need to be regenerated via
# "scripts/generate-test-files.sh".
#
-# Last generated: 2025-04-23T08:07:00.653648+00:00
+# Last generated: 2025-05-06T10:23:50.156629+00:00
[tox]
requires =
@@ -36,10 +36,6 @@ envlist =
# At a minimum, we should test against at least the lowest
# and the latest supported version of a framework.
- # Anthropic
- {py3.8,py3.11,py3.12}-anthropic-v{0.16,0.28,0.40}
- {py3.7,py3.11,py3.12}-anthropic-latest
-
# Arq
{py3.7,py3.11}-arq-v{0.23}
{py3.7,py3.12,py3.13}-arq-latest
@@ -139,6 +135,11 @@ envlist =
# integration tests there.
# ~~~ AI ~~~
+ {py3.8,py3.11,py3.12}-anthropic-v0.16.0
+ {py3.8,py3.11,py3.12}-anthropic-v0.27.0
+ {py3.8,py3.11,py3.12}-anthropic-v0.38.0
+ {py3.8,py3.11,py3.12}-anthropic-v0.50.0
+
{py3.9,py3.10,py3.11}-cohere-v5.4.0
{py3.9,py3.11,py3.12}-cohere-v5.8.1
{py3.9,py3.11,py3.12}-cohere-v5.11.4
@@ -156,7 +157,7 @@ envlist =
{py3.6}-pymongo-v3.5.1
{py3.6,py3.10,py3.11}-pymongo-v3.13.0
{py3.6,py3.9,py3.10}-pymongo-v4.0.2
- {py3.9,py3.12,py3.13}-pymongo-v4.12.0
+ {py3.9,py3.12,py3.13}-pymongo-v4.12.1
{py3.6}-redis_py_cluster_legacy-v1.3.6
{py3.6,py3.7}-redis_py_cluster_legacy-v2.0.0
@@ -215,7 +216,7 @@ envlist =
# ~~~ Tasks ~~~
{py3.6,py3.7,py3.8}-celery-v4.4.7
{py3.6,py3.7,py3.8}-celery-v5.0.5
- {py3.8,py3.12,py3.13}-celery-v5.5.1
+ {py3.8,py3.12,py3.13}-celery-v5.5.2
{py3.6,py3.7}-dramatiq-v1.9.0
{py3.6,py3.8,py3.9}-dramatiq-v1.12.3
@@ -274,7 +275,7 @@ envlist =
{py3.8,py3.10,py3.11}-litestar-v2.0.1
{py3.8,py3.11,py3.12}-litestar-v2.5.5
{py3.8,py3.11,py3.12}-litestar-v2.10.0
- {py3.8,py3.12,py3.13}-litestar-v2.15.2
+ {py3.8,py3.12,py3.13}-litestar-v2.16.0
{py3.6}-pyramid-v1.8.6
{py3.6,py3.8,py3.9}-pyramid-v1.10.8
@@ -289,6 +290,7 @@ envlist =
{py3.6,py3.8,py3.9}-tornado-v6.1
{py3.7,py3.9,py3.10}-tornado-v6.2
{py3.8,py3.10,py3.11}-tornado-v6.4.2
+ {py3.9,py3.12,py3.13}-tornado-v6.5b1
# ~~~ Misc ~~~
@@ -298,10 +300,10 @@ envlist =
{py3.6}-trytond-v4.8.18
{py3.6,py3.7,py3.8}-trytond-v5.8.16
{py3.8,py3.10,py3.11}-trytond-v6.8.17
- {py3.8,py3.11,py3.12}-trytond-v7.0.29
- {py3.8,py3.11,py3.12}-trytond-v7.4.9
+ {py3.8,py3.11,py3.12}-trytond-v7.0.31
+ {py3.9,py3.12,py3.13}-trytond-v7.6.0
- {py3.7,py3.12,py3.13}-typer-v0.15.2
+ {py3.7,py3.12,py3.13}-typer-v0.15.3
@@ -335,14 +337,6 @@ deps =
# === Integrations ===
- # Anthropic
- anthropic: pytest-asyncio
- anthropic-v{0.16,0.28}: httpx<0.28.0
- anthropic-v0.16: anthropic~=0.16.0
- anthropic-v0.28: anthropic~=0.28.0
- anthropic-v0.40: anthropic~=0.40.0
- anthropic-latest: anthropic
-
# Arq
arq-v0.23: arq~=0.23.0
arq-v0.23: pydantic<2
@@ -506,6 +500,15 @@ deps =
# integration tests there.
# ~~~ AI ~~~
+ anthropic-v0.16.0: anthropic==0.16.0
+ anthropic-v0.27.0: anthropic==0.27.0
+ anthropic-v0.38.0: anthropic==0.38.0
+ anthropic-v0.50.0: anthropic==0.50.0
+ anthropic: pytest-asyncio
+ anthropic-v0.16.0: httpx<0.28.0
+ anthropic-v0.27.0: httpx<0.28.0
+ anthropic-v0.38.0: httpx<0.28.0
+
cohere-v5.4.0: cohere==5.4.0
cohere-v5.8.1: cohere==5.8.1
cohere-v5.11.4: cohere==5.11.4
@@ -523,7 +526,7 @@ deps =
pymongo-v3.5.1: pymongo==3.5.1
pymongo-v3.13.0: pymongo==3.13.0
pymongo-v4.0.2: pymongo==4.0.2
- pymongo-v4.12.0: pymongo==4.12.0
+ pymongo-v4.12.1: pymongo==4.12.1
pymongo: mockupdb
redis_py_cluster_legacy-v1.3.6: redis-py-cluster==1.3.6
@@ -600,7 +603,7 @@ deps =
# ~~~ Tasks ~~~
celery-v4.4.7: celery==4.4.7
celery-v5.0.5: celery==5.0.5
- celery-v5.5.1: celery==5.5.1
+ celery-v5.5.2: celery==5.5.2
celery: newrelic
celery: redis
py3.7-celery: importlib-metadata<5.0
@@ -711,7 +714,7 @@ deps =
litestar-v2.0.1: litestar==2.0.1
litestar-v2.5.5: litestar==2.5.5
litestar-v2.10.0: litestar==2.10.0
- litestar-v2.15.2: litestar==2.15.2
+ litestar-v2.16.0: litestar==2.16.0
litestar: pytest-asyncio
litestar: python-multipart
litestar: requests
@@ -739,6 +742,7 @@ deps =
tornado-v6.1: tornado==6.1
tornado-v6.2: tornado==6.2
tornado-v6.4.2: tornado==6.4.2
+ tornado-v6.5b1: tornado==6.5b1
tornado: pytest
tornado-v6.0.4: pytest<8.2
tornado-v6.1: pytest<8.2
@@ -753,13 +757,13 @@ deps =
trytond-v4.8.18: trytond==4.8.18
trytond-v5.8.16: trytond==5.8.16
trytond-v6.8.17: trytond==6.8.17
- trytond-v7.0.29: trytond==7.0.29
- trytond-v7.4.9: trytond==7.4.9
+ trytond-v7.0.31: trytond==7.0.31
+ trytond-v7.6.0: trytond==7.6.0
trytond: werkzeug
trytond-v4.6.22: werkzeug<1.0
trytond-v4.8.18: werkzeug<1.0
- typer-v0.15.2: typer==0.15.2
+ typer-v0.15.3: typer==0.15.3