diff --git a/sentry_sdk/_types.py b/sentry_sdk/_types.py index baf5f6a2fd..814b90c440 100644 --- a/sentry_sdk/_types.py +++ b/sentry_sdk/_types.py @@ -209,6 +209,7 @@ class SDKInfo(TypedDict): "type": Literal["check_in", "transaction"], "user": dict[str, object], "_dropped_spans": int, + "_has_gen_ai_span": bool, }, total=False, ) diff --git a/sentry_sdk/client.py b/sentry_sdk/client.py index 9f795d2489..fd102e0679 100644 --- a/sentry_sdk/client.py +++ b/sentry_sdk/client.py @@ -2,11 +2,12 @@ import uuid import random import socket -from collections.abc import Mapping +from collections.abc import Mapping, Iterable from datetime import datetime, timezone from importlib import import_module from typing import TYPE_CHECKING, List, Dict, cast, overload import warnings +import json from sentry_sdk._compat import check_uwsgi_thread_support from sentry_sdk._metrics_batcher import MetricsBatcher @@ -30,6 +31,7 @@ ) from sentry_sdk.serializer import serialize from sentry_sdk.tracing import trace +from sentry_sdk.traces import SpanStatus from sentry_sdk.tracing_utils import has_span_streaming_enabled from sentry_sdk.transport import ( HttpTransportCore, @@ -38,6 +40,7 @@ ) from sentry_sdk.consts import ( SPANDATA, + SPANSTATUS, DEFAULT_MAX_VALUE_LENGTH, DEFAULT_OPTIONS, INSTRUMENTER, @@ -56,6 +59,8 @@ ) from sentry_sdk.scrubber import EventScrubber from sentry_sdk.monitor import Monitor +from sentry_sdk.envelope import Item, PayloadRef +from sentry_sdk.utils import datetime_from_isoformat if TYPE_CHECKING: from typing import Any @@ -66,7 +71,15 @@ from typing import Union from typing import TypeVar - from sentry_sdk._types import Event, Hint, SDKInfo, Log, Metric, EventDataCategory + from sentry_sdk._types import ( + Event, + Hint, + SDKInfo, + Log, + Metric, + EventDataCategory, + SerializedAttributeValue, + ) from sentry_sdk.integrations import Integration from sentry_sdk.scope import Scope from sentry_sdk.session import Session @@ -89,6 +102,197 @@ } +def _serialized_v1_attribute_to_serialized_v2_attribute( + attribute_value: "Any", +) -> "Optional[SerializedAttributeValue]": + if isinstance(attribute_value, bool): + return { + "value": attribute_value, + "type": "boolean", + } + + if isinstance(attribute_value, int): + return { + "value": attribute_value, + "type": "integer", + } + + if isinstance(attribute_value, float): + return { + "value": attribute_value, + "type": "double", + } + + if isinstance(attribute_value, str): + return { + "value": attribute_value, + "type": "string", + } + + if isinstance(attribute_value, list): + if not attribute_value: + return {"value": [], "type": "array"} + + ty = type(attribute_value[0]) + if ty in (int, str, bool, float) and all( + type(v) is ty for v in attribute_value + ): + return { + "value": attribute_value, + "type": "array", + } + + # Types returned when the serializer for V1 span attributes recurses into some container types. + if isinstance(attribute_value, (dict, list)): + return { + "value": json.dumps(attribute_value), + "type": "string", + } + + if attribute_value is None: + return { + "value": "None", + "type": "string", + } + + return None + + +def _serialized_v1_span_to_serialized_v2_span( + span: "dict[str, Any]", event: "Event" +) -> "dict[str, Any]": + # See SpanBatcher._to_transport_format() for analogous population of all entries except "attributes". + res: "dict[str, Any]" = { + "status": SpanStatus.OK.value, + "is_segment": False, + } + + if "trace_id" in span: + res["trace_id"] = span["trace_id"] + + if "span_id" in span: + res["span_id"] = span["span_id"] + + if "description" in span: + res["name"] = span["description"] + + if "start_timestamp" in span: + start_timestamp = None + try: + start_timestamp = datetime_from_isoformat(span["start_timestamp"]) + except Exception: + pass + + if start_timestamp is not None: + res["start_timestamp"] = start_timestamp.timestamp() + + if "timestamp" in span: + end_timestamp = None + try: + end_timestamp = datetime_from_isoformat(span["timestamp"]) + except Exception: + pass + + if end_timestamp is not None: + res["end_timestamp"] = end_timestamp.timestamp() + + if "parent_span_id" in span: + res["parent_span_id"] = span["parent_span_id"] + + if "status" in span and span["status"] != SPANSTATUS.OK: + res["status"] = "error" + + attributes: "Dict[str, Any]" = {} + + if "op" in span: + attributes["sentry.op"] = span["op"] + if "origin" in span: + attributes["sentry.origin"] = span["origin"] + + span_data = span.get("data") + if isinstance(span_data, dict): + attributes.update(span_data) + + span_tags = span.get("tags") + if isinstance(span_tags, dict): + attributes.update(span_tags) + + # See Scope._apply_user_attributes_to_telemetry() for user attributes. + user = event.get("user") + if isinstance(user, dict): + if "id" in user: + attributes["user.id"] = user["id"] + if "username" in user: + attributes["user.name"] = user["username"] + if "email" in user: + attributes["user.email"] = user["email"] + + # See Scope.set_global_attributes() for release, environment, and SDK metadata. + if "release" in event: + attributes["sentry.release"] = event["release"] + if "environment" in event: + attributes["sentry.environment"] = event["environment"] + if "transaction" in event: + attributes["sentry.segment.name"] = event["transaction"] + + trace_context = event.get("contexts", {}).get("trace", {}) + if "span_id" in trace_context: + attributes["sentry.segment.id"] = trace_context["span_id"] + + sdk_info = event.get("sdk") + if isinstance(sdk_info, dict): + if "name" in sdk_info: + attributes["sentry.sdk.name"] = sdk_info["name"] + if "version" in sdk_info: + attributes["sentry.sdk.version"] = sdk_info["version"] + + if not attributes: + return res + + res["attributes"] = {} + for key, value in attributes.items(): + converted_value = _serialized_v1_attribute_to_serialized_v2_attribute(value) + if converted_value is None: + continue + + res["attributes"][key] = converted_value + + # Remove redundant attribute, as status is stored in the status field. + if "status" in res["attributes"]: + del res["attributes"]["status"] + + return res + + +def _split_gen_ai_spans( + event_opt: "Event", +) -> "Optional[tuple[List[Dict[str, object]], List[Dict[str, object]]]]": + if "spans" not in event_opt: + return None + + spans: "Any" = event_opt["spans"] + if isinstance(spans, AnnotatedValue): + spans = spans.value + + if not isinstance(spans, Iterable): + return None + + non_gen_ai_spans = [] + gen_ai_spans = [] + for span in spans: + if not isinstance(span, dict): + non_gen_ai_spans.append(span) + continue + + span_op = span.get("op") + if isinstance(span_op, str) and span_op.startswith("gen_ai."): + gen_ai_spans.append(span) + else: + non_gen_ai_spans.append(span) + + return non_gen_ai_spans, gen_ai_spans + + def _get_options(*args: "Optional[str]", **kwargs: "Any") -> "Dict[str, Any]": if args and (isinstance(args[0], (bytes, str)) or args[0] is None): dsn: "Optional[str]" = args[0] @@ -909,10 +1113,42 @@ def capture_event( envelope = Envelope(headers=headers) - if is_transaction: - if isinstance(profile, Profile): - envelope.add_profile(profile.to_json(event_opt, self.options)) + if is_transaction and isinstance(profile, Profile): + envelope.add_profile(profile.to_json(event_opt, self.options)) + + span_recorder_has_gen_ai_span = event.pop("_has_gen_ai_span", False) + + if is_transaction and not span_recorder_has_gen_ai_span: envelope.add_transaction(event_opt) + elif is_transaction: + split_spans = _split_gen_ai_spans(event_opt) + if split_spans is None or not split_spans[1]: + envelope.add_transaction(event_opt) + else: + non_gen_ai_spans, gen_ai_spans = split_spans + + event_opt["spans"] = non_gen_ai_spans + envelope.add_transaction(event_opt) + + converted_gen_ai_spans = [ + _serialized_v1_span_to_serialized_v2_span(span, event) + for span in gen_ai_spans + if isinstance(span, dict) + ] + + envelope.add_item( + Item( + type=SpanBatcher.TYPE, + content_type=SpanBatcher.CONTENT_TYPE, + headers={ + "item_count": len(converted_gen_ai_spans), + }, + payload=PayloadRef( + json={"items": converted_gen_ai_spans}, + ), + ) + ) + elif is_checkin: envelope.add_checkin(event_opt) else: diff --git a/sentry_sdk/tracing.py b/sentry_sdk/tracing.py index 7f2baba0c9..6c8cbb87e4 100644 --- a/sentry_sdk/tracing.py +++ b/sentry_sdk/tracing.py @@ -1042,11 +1042,16 @@ def finish( return None - finished_spans = [ - span.to_json() - for span in self._span_recorder.spans - if span.timestamp is not None - ] + finished_spans = [] + has_gen_ai_span = False + for span in self._span_recorder.spans: + if span.timestamp is None: + continue + + if isinstance(span.op, str) and span.op.startswith("gen_ai."): + has_gen_ai_span = True + + finished_spans.append(span.to_json()) len_diff = len(self._span_recorder.spans) - len(finished_spans) dropped_spans = len_diff + self._span_recorder.dropped_spans @@ -1078,6 +1083,9 @@ def finish( if dropped_spans > 0: event["_dropped_spans"] = dropped_spans + if has_gen_ai_span: + event["_has_gen_ai_span"] = True + if self._profile is not None and self._profile.valid(): event["profile"] = self._profile self._profile = None diff --git a/tests/integrations/anthropic/test_anthropic.py b/tests/integrations/anthropic/test_anthropic.py index e86f7e1fa9..865013f0b4 100644 --- a/tests/integrations/anthropic/test_anthropic.py +++ b/tests/integrations/anthropic/test_anthropic.py @@ -91,14 +91,14 @@ async def __call__(self, *args, **kwargs): ], ) def test_nonstreaming_create_message( - sentry_init, capture_events, send_default_pii, include_prompts + sentry_init, capture_items, send_default_pii, include_prompts ): sentry_init( integrations=[AnthropicIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("transaction", "span") client = Anthropic(api_key="z") client.messages._post = mock.Mock(return_value=EXAMPLE_MESSAGE) @@ -120,37 +120,38 @@ def test_nonstreaming_create_message( assert usage.input_tokens == 10 assert usage.output_tokens == 20 - assert len(events) == 1 - (event,) = events - - assert event["type"] == "transaction" + (event,) = (item.payload for item in items if item.type == "transaction") assert event["transaction"] == "anthropic" - assert len(event["spans"]) == 1 - (span,) = event["spans"] + spans = [item.payload for item in items if item.type == "span"] + assert len(spans) == 1 + (span,) = spans - assert span["op"] == OP.GEN_AI_CHAT - assert span["description"] == "chat model" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" - assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" + assert span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + assert span["name"] == "chat model" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" + assert span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" if send_default_pii and include_prompts: assert ( - span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] == '[{"role": "user", "content": "Hello, Claude"}]' ) - assert span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi, I'm Claude." + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi, I'm Claude." else: - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["attributes"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["attributes"] - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30 - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is False - assert span["data"][SPANDATA.GEN_AI_RESPONSE_ID] == "msg_01XFDUDYJgAACzvnptvVoYEL" - assert span["data"][SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS] == ["end_turn"] + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30 + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is False + assert ( + span["attributes"][SPANDATA.GEN_AI_RESPONSE_ID] + == "msg_01XFDUDYJgAACzvnptvVoYEL" + ) + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS] == ["end_turn"] @pytest.mark.asyncio @@ -164,14 +165,14 @@ def test_nonstreaming_create_message( ], ) async def test_nonstreaming_create_message_async( - sentry_init, capture_events, send_default_pii, include_prompts + sentry_init, capture_items, send_default_pii, include_prompts ): sentry_init( integrations=[AnthropicIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("transaction", "span") client = AsyncAnthropic(api_key="z") client.messages._post = AsyncMock(return_value=EXAMPLE_MESSAGE) @@ -193,36 +194,37 @@ async def test_nonstreaming_create_message_async( assert usage.input_tokens == 10 assert usage.output_tokens == 20 - assert len(events) == 1 - (event,) = events - - assert event["type"] == "transaction" + (event,) = (item.payload for item in items if item.type == "transaction") assert event["transaction"] == "anthropic" - assert len(event["spans"]) == 1 - (span,) = event["spans"] + spans = [item.payload for item in items if item.type == "span"] + assert len(spans) == 1 + (span,) = spans - assert span["op"] == OP.GEN_AI_CHAT - assert span["description"] == "chat model" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" - assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" + assert span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + assert span["name"] == "chat model" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" + assert span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" if send_default_pii and include_prompts: assert ( - span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] == '[{"role": "user", "content": "Hello, Claude"}]' ) - assert span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi, I'm Claude." + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi, I'm Claude." else: - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["attributes"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["attributes"] - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30 - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is False - assert span["data"][SPANDATA.GEN_AI_RESPONSE_ID] == "msg_01XFDUDYJgAACzvnptvVoYEL" + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30 + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is False + assert ( + span["attributes"][SPANDATA.GEN_AI_RESPONSE_ID] + == "msg_01XFDUDYJgAACzvnptvVoYEL" + ) @pytest.mark.parametrize( @@ -236,7 +238,7 @@ async def test_nonstreaming_create_message_async( ) def test_streaming_create_message( sentry_init, - capture_events, + capture_items, send_default_pii, include_prompts, get_model_response, @@ -286,7 +288,7 @@ def test_streaming_create_message( traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [ { @@ -308,42 +310,45 @@ def test_streaming_create_message( for _ in message: pass - assert len(events) == 1 - (event,) = events - - assert event["type"] == "transaction" + (event,) = (item.payload for item in items if item.type == "transaction") assert event["transaction"] == "anthropic" - span = next(span for span in event["spans"] if span["op"] == OP.GEN_AI_CHAT) + spans = [item.payload for item in items if item.type == "span"] + span = next( + span for span in spans if span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + ) - assert span["op"] == OP.GEN_AI_CHAT - assert span["description"] == "chat model" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" - assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" + assert span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + assert span["name"] == "chat model" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" + assert span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" if send_default_pii and include_prompts: assert ( - span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] == '[{"role": "user", "content": "Hello, Claude"}]' ) - assert span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi! I'm Claude!" + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi! I'm Claude!" else: - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["attributes"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["attributes"] - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 20 - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True - assert span["data"][SPANDATA.GEN_AI_RESPONSE_ID] == "msg_01XFDUDYJgAACzvnptvVoYEL" - assert span["data"][SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS] == ["max_tokens"] + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 10 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 20 + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True + assert ( + span["attributes"][SPANDATA.GEN_AI_RESPONSE_ID] + == "msg_01XFDUDYJgAACzvnptvVoYEL" + ) + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS] == ["max_tokens"] def test_streaming_create_message_close( sentry_init, - capture_events, + capture_items, get_model_response, server_side_event_chunks, ): @@ -391,7 +396,7 @@ def test_streaming_create_message_close( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [ { @@ -415,31 +420,34 @@ def test_streaming_create_message_close( messages.close() - assert len(events) == 1 - (event,) = events - - assert event["type"] == "transaction" + (event,) = (item.payload for item in items if item.type == "transaction") assert event["transaction"] == "anthropic" - span = next(span for span in event["spans"] if span["op"] == OP.GEN_AI_CHAT) + spans = [item.payload for item in items if item.type == "span"] + span = next( + span for span in spans if span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + ) - assert span["op"] == OP.GEN_AI_CHAT - assert span["description"] == "chat model" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" - assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" + assert span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + assert span["name"] == "chat model" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" + assert span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" assert ( - span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] == '[{"role": "user", "content": "Hello, Claude"}]' ) - assert span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi!" + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi!" - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30 - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True - assert span["data"][SPANDATA.GEN_AI_RESPONSE_ID] == "msg_01XFDUDYJgAACzvnptvVoYEL" + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30 + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True + assert ( + span["attributes"][SPANDATA.GEN_AI_RESPONSE_ID] + == "msg_01XFDUDYJgAACzvnptvVoYEL" + ) @pytest.mark.skipif( @@ -448,7 +456,7 @@ def test_streaming_create_message_close( ) def test_streaming_create_message_api_error( sentry_init, - capture_events, + capture_items, get_model_response, server_side_event_chunks, ): @@ -491,7 +499,7 @@ def test_streaming_create_message_api_error( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [ { @@ -513,34 +521,36 @@ def test_streaming_create_message_api_error( for _ in message: pass - assert len(events) == 1 - (event,) = events - - assert event["type"] == "transaction" + (event,) = (item.payload for item in items if item.type == "transaction") assert event["transaction"] == "anthropic" - span = next(span for span in event["spans"] if span["op"] == OP.GEN_AI_CHAT) + spans = [item.payload for item in items if item.type == "span"] + span = next( + span for span in spans if span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + ) - assert span["op"] == OP.GEN_AI_CHAT - assert span["description"] == "chat model" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" - assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" + assert span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + assert span["name"] == "chat model" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" + assert span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" assert ( - span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] == '[{"role": "user", "content": "Hello, Claude"}]' ) - assert span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi!" + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi!" - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30 - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True - assert span["data"][SPANDATA.GEN_AI_RESPONSE_ID] == "msg_01XFDUDYJgAACzvnptvVoYEL" + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30 + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True + assert ( + span["attributes"][SPANDATA.GEN_AI_RESPONSE_ID] + == "msg_01XFDUDYJgAACzvnptvVoYEL" + ) - assert span["status"] == "internal_error" - assert span["tags"]["status"] == "internal_error" + assert span["status"] == "error" assert event["contexts"]["trace"]["status"] == "internal_error" @@ -555,7 +565,7 @@ def test_streaming_create_message_api_error( ) def test_stream_messages( sentry_init, - capture_events, + capture_items, send_default_pii, include_prompts, get_model_response, @@ -605,7 +615,7 @@ def test_stream_messages( traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [ { @@ -628,42 +638,45 @@ def test_stream_messages( for event in stream: pass - assert len(events) == 1 - (event,) = events - - assert event["type"] == "transaction" + (event,) = (item.payload for item in items if item.type == "transaction") assert event["transaction"] == "anthropic" - span = next(span for span in event["spans"] if span["op"] == OP.GEN_AI_CHAT) + spans = [item.payload for item in items if item.type == "span"] + span = next( + span for span in spans if span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + ) - assert span["op"] == OP.GEN_AI_CHAT - assert span["description"] == "chat model" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" - assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" + assert span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + assert span["name"] == "chat model" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" + assert span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" if send_default_pii and include_prompts: assert ( - span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] == '[{"role": "user", "content": "Hello, Claude"}]' ) - assert span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi! I'm Claude!" + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi! I'm Claude!" else: - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["attributes"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["attributes"] - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 20 - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True - assert span["data"][SPANDATA.GEN_AI_RESPONSE_ID] == "msg_01XFDUDYJgAACzvnptvVoYEL" - assert span["data"][SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS] == ["max_tokens"] + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 10 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 20 + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True + assert ( + span["attributes"][SPANDATA.GEN_AI_RESPONSE_ID] + == "msg_01XFDUDYJgAACzvnptvVoYEL" + ) + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS] == ["max_tokens"] def test_stream_messages_close( sentry_init, - capture_events, + capture_items, get_model_response, server_side_event_chunks, ): @@ -711,7 +724,7 @@ def test_stream_messages_close( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [ { @@ -740,31 +753,34 @@ def test_stream_messages_close( stream.close() - assert len(events) == 1 - (event,) = events - - assert event["type"] == "transaction" + (event,) = (item.payload for item in items if item.type == "transaction") assert event["transaction"] == "anthropic" - span = next(span for span in event["spans"] if span["op"] == OP.GEN_AI_CHAT) + spans = [item.payload for item in items if item.type == "span"] + span = next( + span for span in spans if span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + ) - assert span["op"] == OP.GEN_AI_CHAT - assert span["description"] == "chat model" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" - assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" + assert span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + assert span["name"] == "chat model" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" + assert span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" assert ( - span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] == '[{"role": "user", "content": "Hello, Claude"}]' ) - assert span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi!" + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi!" - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30 - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True - assert span["data"][SPANDATA.GEN_AI_RESPONSE_ID] == "msg_01XFDUDYJgAACzvnptvVoYEL" + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30 + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True + assert ( + span["attributes"][SPANDATA.GEN_AI_RESPONSE_ID] + == "msg_01XFDUDYJgAACzvnptvVoYEL" + ) @pytest.mark.skipif( @@ -773,7 +789,7 @@ def test_stream_messages_close( ) def test_stream_messages_api_error( sentry_init, - capture_events, + capture_items, get_model_response, server_side_event_chunks, ): @@ -816,7 +832,7 @@ def test_stream_messages_api_error( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [ { @@ -839,34 +855,36 @@ def test_stream_messages_api_error( for event in stream: pass - assert len(events) == 1 - (event,) = events - - assert event["type"] == "transaction" + (event,) = (item.payload for item in items if item.type == "transaction") assert event["transaction"] == "anthropic" - span = next(span for span in event["spans"] if span["op"] == OP.GEN_AI_CHAT) + spans = [item.payload for item in items if item.type == "span"] + span = next( + span for span in spans if span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + ) - assert span["op"] == OP.GEN_AI_CHAT - assert span["description"] == "chat model" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" - assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" + assert span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + assert span["name"] == "chat model" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" + assert span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" assert ( - span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] == '[{"role": "user", "content": "Hello, Claude"}]' ) - assert span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi!" + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi!" - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30 - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True - assert span["data"][SPANDATA.GEN_AI_RESPONSE_ID] == "msg_01XFDUDYJgAACzvnptvVoYEL" + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30 + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True + assert ( + span["attributes"][SPANDATA.GEN_AI_RESPONSE_ID] + == "msg_01XFDUDYJgAACzvnptvVoYEL" + ) - assert span["status"] == "internal_error" - assert span["tags"]["status"] == "internal_error" + assert span["status"] == "error" assert event["contexts"]["trace"]["status"] == "internal_error" @@ -882,7 +900,7 @@ def test_stream_messages_api_error( ) async def test_streaming_create_message_async( sentry_init, - capture_events, + capture_items, send_default_pii, include_prompts, get_model_response, @@ -936,7 +954,7 @@ async def test_streaming_create_message_async( default_integrations=False, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [ { @@ -958,44 +976,45 @@ async def test_streaming_create_message_async( async for _ in message: pass - assert len(events) == 1 - (event,) = events - - assert event["type"] == "transaction" + (event,) = (item.payload for item in items if item.type == "transaction") assert event["transaction"] == "anthropic" - assert len(event["spans"]) == 1 - (span,) = event["spans"] + spans = [item.payload for item in items if item.type == "span"] + assert len(spans) == 1 + (span,) = spans - assert span["op"] == OP.GEN_AI_CHAT - assert span["description"] == "chat model" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" - assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" + assert span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + assert span["name"] == "chat model" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" + assert span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" if send_default_pii and include_prompts: assert ( - span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] == '[{"role": "user", "content": "Hello, Claude"}]' ) - assert span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi! I'm Claude!" + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi! I'm Claude!" else: - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["attributes"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["attributes"] - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 20 - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True - assert span["data"][SPANDATA.GEN_AI_RESPONSE_ID] == "msg_01XFDUDYJgAACzvnptvVoYEL" - assert span["data"][SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS] == ["max_tokens"] + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 10 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 20 + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True + assert ( + span["attributes"][SPANDATA.GEN_AI_RESPONSE_ID] + == "msg_01XFDUDYJgAACzvnptvVoYEL" + ) + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS] == ["max_tokens"] @pytest.mark.asyncio async def test_streaming_create_message_async_close( sentry_init, - capture_events, + capture_items, get_model_response, async_iterator, server_side_event_chunks, @@ -1046,7 +1065,7 @@ async def test_streaming_create_message_async_close( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [ { @@ -1069,31 +1088,34 @@ async def test_streaming_create_message_async_close( await messages.__anext__() await messages.close() - assert len(events) == 1 - (event,) = events - - assert event["type"] == "transaction" + (event,) = (item.payload for item in items if item.type == "transaction") assert event["transaction"] == "anthropic" - span = next(span for span in event["spans"] if span["op"] == OP.GEN_AI_CHAT) + spans = [item.payload for item in items if item.type == "span"] + span = next( + span for span in spans if span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + ) - assert span["op"] == OP.GEN_AI_CHAT - assert span["description"] == "chat model" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" - assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" + assert span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + assert span["name"] == "chat model" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" + assert span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" assert ( - span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] == '[{"role": "user", "content": "Hello, Claude"}]' ) - assert span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi!" + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi!" - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30 - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True - assert span["data"][SPANDATA.GEN_AI_RESPONSE_ID] == "msg_01XFDUDYJgAACzvnptvVoYEL" + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30 + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True + assert ( + span["attributes"][SPANDATA.GEN_AI_RESPONSE_ID] + == "msg_01XFDUDYJgAACzvnptvVoYEL" + ) @pytest.mark.skipif( @@ -1103,7 +1125,7 @@ async def test_streaming_create_message_async_close( @pytest.mark.asyncio async def test_streaming_create_message_async_api_error( sentry_init, - capture_events, + capture_items, get_model_response, async_iterator, server_side_event_chunks, @@ -1149,7 +1171,7 @@ async def test_streaming_create_message_async_api_error( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [ { @@ -1171,34 +1193,36 @@ async def test_streaming_create_message_async_api_error( async for _ in message: pass - assert len(events) == 1 - (event,) = events - - assert event["type"] == "transaction" + (event,) = (item.payload for item in items if item.type == "transaction") assert event["transaction"] == "anthropic" - span = next(span for span in event["spans"] if span["op"] == OP.GEN_AI_CHAT) + spans = [item.payload for item in items if item.type == "span"] + span = next( + span for span in spans if span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + ) - assert span["op"] == OP.GEN_AI_CHAT - assert span["description"] == "chat model" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" - assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" + assert span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + assert span["name"] == "chat model" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" + assert span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" assert ( - span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] == '[{"role": "user", "content": "Hello, Claude"}]' ) - assert span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi!" + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi!" - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30 - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True - assert span["data"][SPANDATA.GEN_AI_RESPONSE_ID] == "msg_01XFDUDYJgAACzvnptvVoYEL" + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30 + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True + assert ( + span["attributes"][SPANDATA.GEN_AI_RESPONSE_ID] + == "msg_01XFDUDYJgAACzvnptvVoYEL" + ) - assert span["status"] == "internal_error" - assert span["tags"]["status"] == "internal_error" + assert span["status"] == "error" assert event["contexts"]["trace"]["status"] == "internal_error" @@ -1214,7 +1238,7 @@ async def test_streaming_create_message_async_api_error( ) async def test_stream_message_async( sentry_init, - capture_events, + capture_items, send_default_pii, include_prompts, get_model_response, @@ -1267,7 +1291,7 @@ async def test_stream_message_async( traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [ { @@ -1290,37 +1314,38 @@ async def test_stream_message_async( async for event in stream: pass - assert len(events) == 1 - (event,) = events - - assert event["type"] == "transaction" + (event,) = (item.payload for item in items if item.type == "transaction") assert event["transaction"] == "anthropic" - assert len(event["spans"]) == 1 - (span,) = event["spans"] + spans = [item.payload for item in items if item.type == "span"] + assert len(spans) == 1 + (span,) = spans - assert span["op"] == OP.GEN_AI_CHAT - assert span["description"] == "chat model" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" - assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" + assert span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + assert span["name"] == "chat model" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" + assert span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" if send_default_pii and include_prompts: assert ( - span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] == '[{"role": "user", "content": "Hello, Claude"}]' ) - assert span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi! I'm Claude!" + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi! I'm Claude!" else: - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["attributes"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["attributes"] - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 20 - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True - assert span["data"][SPANDATA.GEN_AI_RESPONSE_ID] == "msg_01XFDUDYJgAACzvnptvVoYEL" + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 10 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 20 + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True + assert ( + span["attributes"][SPANDATA.GEN_AI_RESPONSE_ID] + == "msg_01XFDUDYJgAACzvnptvVoYEL" + ) @pytest.mark.skipif( @@ -1330,7 +1355,7 @@ async def test_stream_message_async( @pytest.mark.asyncio async def test_stream_messages_async_api_error( sentry_init, - capture_events, + capture_items, get_model_response, async_iterator, server_side_event_chunks, @@ -1376,7 +1401,7 @@ async def test_stream_messages_async_api_error( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [ { @@ -1399,41 +1424,43 @@ async def test_stream_messages_async_api_error( async for event in stream: pass - assert len(events) == 1 - (event,) = events - - assert event["type"] == "transaction" + (event,) = (item.payload for item in items if item.type == "transaction") assert event["transaction"] == "anthropic" - span = next(span for span in event["spans"] if span["op"] == OP.GEN_AI_CHAT) + spans = [item.payload for item in items if item.type == "span"] + span = next( + span for span in spans if span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + ) - assert span["op"] == OP.GEN_AI_CHAT - assert span["description"] == "chat model" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" - assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" + assert span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + assert span["name"] == "chat model" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" + assert span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" assert ( - span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] == '[{"role": "user", "content": "Hello, Claude"}]' ) - assert span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi!" + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi!" - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30 - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True - assert span["data"][SPANDATA.GEN_AI_RESPONSE_ID] == "msg_01XFDUDYJgAACzvnptvVoYEL" + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30 + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True + assert ( + span["attributes"][SPANDATA.GEN_AI_RESPONSE_ID] + == "msg_01XFDUDYJgAACzvnptvVoYEL" + ) - assert span["status"] == "internal_error" - assert span["tags"]["status"] == "internal_error" + assert span["status"] == "error" assert event["contexts"]["trace"]["status"] == "internal_error" @pytest.mark.asyncio async def test_stream_messages_async_close( sentry_init, - capture_events, + capture_items, get_model_response, async_iterator, server_side_event_chunks, @@ -1484,7 +1511,7 @@ async def test_stream_messages_async_close( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [ { @@ -1515,31 +1542,34 @@ async def test_stream_messages_async_close( await stream.close() - assert len(events) == 1 - (event,) = events - - assert event["type"] == "transaction" + (event,) = (item.payload for item in items if item.type == "transaction") assert event["transaction"] == "anthropic" - span = next(span for span in event["spans"] if span["op"] == OP.GEN_AI_CHAT) + spans = [item.payload for item in items if item.type == "span"] + span = next( + span for span in spans if span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + ) - assert span["op"] == OP.GEN_AI_CHAT - assert span["description"] == "chat model" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" - assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" + assert span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + assert span["name"] == "chat model" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" + assert span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" assert ( - span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] == '[{"role": "user", "content": "Hello, Claude"}]' ) - assert span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi!" + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi!" - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30 - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True - assert span["data"][SPANDATA.GEN_AI_RESPONSE_ID] == "msg_01XFDUDYJgAACzvnptvVoYEL" + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30 + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True + assert ( + span["attributes"][SPANDATA.GEN_AI_RESPONSE_ID] + == "msg_01XFDUDYJgAACzvnptvVoYEL" + ) @pytest.mark.skipif( @@ -1557,7 +1587,7 @@ async def test_stream_messages_async_close( ) def test_streaming_create_message_with_input_json_delta( sentry_init, - capture_events, + capture_items, send_default_pii, include_prompts, get_model_response, @@ -1637,7 +1667,7 @@ def test_streaming_create_message_with_input_json_delta( traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [ { @@ -1659,38 +1689,36 @@ def test_streaming_create_message_with_input_json_delta( for _ in message: pass - assert len(events) == 1 - (event,) = events - - assert event["type"] == "transaction" + (event,) = (item.payload for item in items if item.type == "transaction") assert event["transaction"] == "anthropic" - assert len(event["spans"]) == 1 - (span,) = event["spans"] + spans = [item.payload for item in items if item.type == "span"] + assert len(spans) == 1 + (span,) = spans - assert span["op"] == OP.GEN_AI_CHAT - assert span["description"] == "chat model" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" - assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" + assert span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + assert span["name"] == "chat model" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" + assert span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" if send_default_pii and include_prompts: assert ( - span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] == '[{"role": "user", "content": "What is the weather like in San Francisco?"}]' ) assert ( - span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] + span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] == '{"location": "San Francisco, CA"}' ) else: - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["attributes"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["attributes"] - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 366 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 41 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 407 - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 366 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 41 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 407 + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True @pytest.mark.skipif( @@ -1708,7 +1736,7 @@ def test_streaming_create_message_with_input_json_delta( ) def test_stream_messages_with_input_json_delta( sentry_init, - capture_events, + capture_items, send_default_pii, include_prompts, get_model_response, @@ -1788,7 +1816,7 @@ def test_stream_messages_with_input_json_delta( traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [ { @@ -1811,38 +1839,36 @@ def test_stream_messages_with_input_json_delta( for event in stream: pass - assert len(events) == 1 - (event,) = events - - assert event["type"] == "transaction" + (event,) = (item.payload for item in items if item.type == "transaction") assert event["transaction"] == "anthropic" - assert len(event["spans"]) == 1 - (span,) = event["spans"] + spans = [item.payload for item in items if item.type == "span"] + assert len(spans) == 1 + (span,) = spans - assert span["op"] == OP.GEN_AI_CHAT - assert span["description"] == "chat model" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" - assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" + assert span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + assert span["name"] == "chat model" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" + assert span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" if send_default_pii and include_prompts: assert ( - span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] == '[{"role": "user", "content": "What is the weather like in San Francisco?"}]' ) assert ( - span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] + span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] == '{"location": "San Francisco, CA"}' ) else: - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["attributes"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["attributes"] - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 366 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 41 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 407 - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 366 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 41 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 407 + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True @pytest.mark.asyncio @@ -1861,7 +1887,7 @@ def test_stream_messages_with_input_json_delta( ) async def test_streaming_create_message_with_input_json_delta_async( sentry_init, - capture_events, + capture_items, send_default_pii, include_prompts, get_model_response, @@ -1947,7 +1973,7 @@ async def test_streaming_create_message_with_input_json_delta_async( traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [ { @@ -1969,39 +1995,37 @@ async def test_streaming_create_message_with_input_json_delta_async( async for _ in message: pass - assert len(events) == 1 - (event,) = events - - assert event["type"] == "transaction" + (event,) = (item.payload for item in items if item.type == "transaction") assert event["transaction"] == "anthropic" - assert len(event["spans"]) == 1 - (span,) = event["spans"] + spans = [item.payload for item in items if item.type == "span"] + assert len(spans) == 1 + (span,) = spans - assert span["op"] == OP.GEN_AI_CHAT - assert span["description"] == "chat model" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" - assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" + assert span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + assert span["name"] == "chat model" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" + assert span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" if send_default_pii and include_prompts: assert ( - span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] == '[{"role": "user", "content": "What is the weather like in San Francisco?"}]' ) assert ( - span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] + span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] == '{"location": "San Francisco, CA"}' ) else: - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["attributes"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["attributes"] - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 366 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 41 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 407 - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 366 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 41 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 407 + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True @pytest.mark.asyncio @@ -2020,7 +2044,7 @@ async def test_streaming_create_message_with_input_json_delta_async( ) async def test_stream_message_with_input_json_delta_async( sentry_init, - capture_events, + capture_items, send_default_pii, include_prompts, get_model_response, @@ -2106,7 +2130,7 @@ async def test_stream_message_with_input_json_delta_async( traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [ { @@ -2129,44 +2153,45 @@ async def test_stream_message_with_input_json_delta_async( async for event in stream: pass - assert len(events) == 1 - (event,) = events - - assert event["type"] == "transaction" + (event,) = (item.payload for item in items if item.type == "transaction") assert event["transaction"] == "anthropic" - assert len(event["spans"]) == 1 - (span,) = event["spans"] + spans = [item.payload for item in items if item.type == "span"] + assert len(spans) == 1 + (span,) = spans - assert span["op"] == OP.GEN_AI_CHAT - assert span["description"] == "chat model" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" - assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" + assert span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + assert span["name"] == "chat model" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" + assert span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" if send_default_pii and include_prompts: assert ( - span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] == '[{"role": "user", "content": "What is the weather like in San Francisco?"}]' ) assert ( - span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] + span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] == '{"location": "San Francisco, CA"}' ) else: - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["attributes"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["attributes"] - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 366 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 41 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 407 - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 366 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 41 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 407 + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True -def test_exception_message_create(sentry_init, capture_events): - sentry_init(integrations=[AnthropicIntegration()], traces_sample_rate=1.0) - events = capture_events() +def test_exception_message_create(sentry_init, capture_items): + sentry_init( + integrations=[AnthropicIntegration()], + traces_sample_rate=1.0, + ) + items = capture_items("event", "transaction") client = Anthropic(api_key="z") client.messages._post = mock.Mock( @@ -2179,14 +2204,19 @@ def test_exception_message_create(sentry_init, capture_events): max_tokens=1024, ) - (event, transaction) = events + (event,) = (item.payload for item in items if item.type == "event") assert event["level"] == "error" + + (transaction,) = (item.payload for item in items if item.type == "transaction") assert transaction["contexts"]["trace"]["status"] == "internal_error" -def test_span_status_error(sentry_init, capture_events): - sentry_init(integrations=[AnthropicIntegration()], traces_sample_rate=1.0) - events = capture_events() +def test_span_status_error(sentry_init, capture_items): + sentry_init( + integrations=[AnthropicIntegration()], + traces_sample_rate=1.0, + ) + items = capture_items("event", "span") with start_transaction(name="anthropic"): client = Anthropic(api_key="z") @@ -2200,18 +2230,22 @@ def test_span_status_error(sentry_init, capture_events): max_tokens=1024, ) - (error, transaction) = events + (error,) = (item.payload for item in items if item.type == "event") assert error["level"] == "error" - assert transaction["spans"][0]["status"] == "internal_error" - assert transaction["spans"][0]["tags"]["status"] == "internal_error" - assert transaction["spans"][0]["data"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" - assert transaction["spans"][0]["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + + spans = [item.payload for item in items if item.type == "span"] + assert spans[0]["status"] == "error" + assert spans[0]["attributes"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" + assert spans[0]["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" @pytest.mark.asyncio -async def test_span_status_error_async(sentry_init, capture_events): - sentry_init(integrations=[AnthropicIntegration()], traces_sample_rate=1.0) - events = capture_events() +async def test_span_status_error_async(sentry_init, capture_items): + sentry_init( + integrations=[AnthropicIntegration()], + traces_sample_rate=1.0, + ) + items = capture_items("event", "span") with start_transaction(name="anthropic"): client = AsyncAnthropic(api_key="z") @@ -2225,18 +2259,22 @@ async def test_span_status_error_async(sentry_init, capture_events): max_tokens=1024, ) - (error, transaction) = events + (error,) = (item.payload for item in items if item.type == "event") assert error["level"] == "error" - assert transaction["spans"][0]["status"] == "internal_error" - assert transaction["spans"][0]["tags"]["status"] == "internal_error" - assert transaction["spans"][0]["data"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" - assert transaction["spans"][0]["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + + spans = [item.payload for item in items if item.type == "span"] + assert spans[0]["status"] == "error" + assert spans[0]["attributes"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" + assert spans[0]["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" @pytest.mark.asyncio -async def test_exception_message_create_async(sentry_init, capture_events): - sentry_init(integrations=[AnthropicIntegration()], traces_sample_rate=1.0) - events = capture_events() +async def test_exception_message_create_async(sentry_init, capture_items): + sentry_init( + integrations=[AnthropicIntegration()], + traces_sample_rate=1.0, + ) + items = capture_items("event", "transaction") client = AsyncAnthropic(api_key="z") client.messages._post = AsyncMock( @@ -2249,17 +2287,19 @@ async def test_exception_message_create_async(sentry_init, capture_events): max_tokens=1024, ) - (event, transaction) = events + (event,) = (item.payload for item in items if item.type == "event") assert event["level"] == "error" + + (transaction,) = (item.payload for item in items if item.type == "transaction") assert transaction["contexts"]["trace"]["status"] == "internal_error" -def test_span_origin(sentry_init, capture_events): +def test_span_origin(sentry_init, capture_items): sentry_init( integrations=[AnthropicIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") client = Anthropic(api_key="z") client.messages._post = mock.Mock(return_value=EXAMPLE_MESSAGE) @@ -2274,21 +2314,22 @@ def test_span_origin(sentry_init, capture_events): with start_transaction(name="anthropic"): client.messages.create(max_tokens=1024, messages=messages, model="model") - (event,) = events - + (event,) = (item.payload for item in items if item.type == "transaction") assert event["contexts"]["trace"]["origin"] == "manual" - assert event["spans"][0]["origin"] == "auto.ai.anthropic" - assert event["spans"][0]["data"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" - assert event["spans"][0]["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + + spans = [item.payload for item in items if item.type == "span"] + assert spans[0]["attributes"]["sentry.origin"] == "auto.ai.anthropic" + assert spans[0]["attributes"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" + assert spans[0]["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" @pytest.mark.asyncio -async def test_span_origin_async(sentry_init, capture_events): +async def test_span_origin_async(sentry_init, capture_items): sentry_init( integrations=[AnthropicIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") client = AsyncAnthropic(api_key="z") client.messages._post = AsyncMock(return_value=EXAMPLE_MESSAGE) @@ -2303,12 +2344,13 @@ async def test_span_origin_async(sentry_init, capture_events): with start_transaction(name="anthropic"): await client.messages.create(max_tokens=1024, messages=messages, model="model") - (event,) = events - + (event,) = (item.payload for item in items if item.type == "transaction") assert event["contexts"]["trace"]["origin"] == "manual" - assert event["spans"][0]["origin"] == "auto.ai.anthropic" - assert event["spans"][0]["data"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" - assert event["spans"][0]["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + + spans = [item.payload for item in items if item.type == "span"] + assert spans[0]["attributes"]["sentry.origin"] == "auto.ai.anthropic" + assert spans[0]["attributes"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" + assert spans[0]["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" @pytest.mark.skipif( @@ -2392,7 +2434,7 @@ def test_set_output_data_with_input_json_delta(sentry_init): ], ) def test_anthropic_message_role_mapping( - sentry_init, capture_events, test_message, expected_role + sentry_init, capture_items, test_message, expected_role ): """Test that Anthropic integration properly maps message roles like 'ai' to 'assistant'""" sentry_init( @@ -2400,7 +2442,7 @@ def test_anthropic_message_role_mapping( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") client = Anthropic(api_key="z") @@ -2425,29 +2467,28 @@ def mock_messages_create(*args, **kwargs): model="claude-3-opus", max_tokens=10, messages=test_messages ) - (event,) = events - span = event["spans"][0] + span = next(item.payload for item in items if item.type == "span") # Verify that the span was created correctly - assert span["op"] == "gen_ai.chat" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" - assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" - assert SPANDATA.GEN_AI_REQUEST_MESSAGES in span["data"] + assert span["attributes"]["sentry.op"] == "gen_ai.chat" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" + assert span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + assert SPANDATA.GEN_AI_REQUEST_MESSAGES in span["attributes"] # Parse the stored messages - stored_messages = json.loads(span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + stored_messages = json.loads(span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) assert stored_messages[0]["role"] == expected_role -def test_anthropic_message_truncation(sentry_init, capture_events): +def test_anthropic_message_truncation(sentry_init, capture_items): """Test that large messages are truncated properly in Anthropic integration.""" sentry_init( integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") client = Anthropic(api_key="z") client.messages._post = mock.Mock(return_value=EXAMPLE_MESSAGE) @@ -2466,21 +2507,18 @@ def test_anthropic_message_truncation(sentry_init, capture_events): with start_transaction(): client.messages.create(max_tokens=1024, messages=messages, model="model") - assert len(events) > 0 - tx = events[0] - assert tx["type"] == "transaction" - + spans = [item.payload for item in items if item.type == "span"] chat_spans = [ - span for span in tx.get("spans", []) if span.get("op") == OP.GEN_AI_CHAT + span for span in spans if span["attributes"].get("sentry.op") == OP.GEN_AI_CHAT ] assert len(chat_spans) > 0 chat_span = chat_spans[0] - assert chat_span["data"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" - assert chat_span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" - assert SPANDATA.GEN_AI_REQUEST_MESSAGES in chat_span["data"] + assert chat_span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" + assert chat_span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + assert SPANDATA.GEN_AI_REQUEST_MESSAGES in chat_span["attributes"] - messages_data = chat_span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + messages_data = chat_span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] assert isinstance(messages_data, str) parsed_messages = json.loads(messages_data) @@ -2488,18 +2526,19 @@ def test_anthropic_message_truncation(sentry_init, capture_events): assert len(parsed_messages) == 1 assert "small message 5" in str(parsed_messages[0]) + tx = next(item.payload for item in items if item.type == "transaction") assert tx["_meta"]["spans"]["0"]["data"]["gen_ai.request.messages"][""]["len"] == 5 @pytest.mark.asyncio -async def test_anthropic_message_truncation_async(sentry_init, capture_events): +async def test_anthropic_message_truncation_async(sentry_init, capture_items): """Test that large messages are truncated properly in Anthropic integration.""" sentry_init( integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") client = AsyncAnthropic(api_key="z") client.messages._post = mock.AsyncMock(return_value=EXAMPLE_MESSAGE) @@ -2518,21 +2557,18 @@ async def test_anthropic_message_truncation_async(sentry_init, capture_events): with start_transaction(): await client.messages.create(max_tokens=1024, messages=messages, model="model") - assert len(events) > 0 - tx = events[0] - assert tx["type"] == "transaction" - + spans = [item.payload for item in items if item.type == "span"] chat_spans = [ - span for span in tx.get("spans", []) if span.get("op") == OP.GEN_AI_CHAT + span for span in spans if span["attributes"].get("sentry.op") == OP.GEN_AI_CHAT ] assert len(chat_spans) > 0 chat_span = chat_spans[0] - assert chat_span["data"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" - assert chat_span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" - assert SPANDATA.GEN_AI_REQUEST_MESSAGES in chat_span["data"] + assert chat_span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" + assert chat_span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + assert SPANDATA.GEN_AI_REQUEST_MESSAGES in chat_span["attributes"] - messages_data = chat_span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + messages_data = chat_span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] assert isinstance(messages_data, str) parsed_messages = json.loads(messages_data) @@ -2540,6 +2576,7 @@ async def test_anthropic_message_truncation_async(sentry_init, capture_events): assert len(parsed_messages) == 1 assert "small message 5" in str(parsed_messages[0]) + tx = next(item.payload for item in items if item.type == "transaction") assert tx["_meta"]["spans"]["0"]["data"]["gen_ai.request.messages"][""]["len"] == 5 @@ -2553,7 +2590,7 @@ async def test_anthropic_message_truncation_async(sentry_init, capture_events): ], ) def test_nonstreaming_create_message_with_system_prompt( - sentry_init, capture_events, send_default_pii, include_prompts + sentry_init, capture_items, send_default_pii, include_prompts ): """Test that system prompts are properly captured in GEN_AI_REQUEST_MESSAGES.""" sentry_init( @@ -2561,7 +2598,7 @@ def test_nonstreaming_create_message_with_system_prompt( traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("transaction", "span") client = Anthropic(api_key="z") client.messages._post = mock.Mock(return_value=EXAMPLE_MESSAGE) @@ -2586,46 +2623,46 @@ def test_nonstreaming_create_message_with_system_prompt( assert usage.input_tokens == 10 assert usage.output_tokens == 20 - assert len(events) == 1 - (event,) = events - - assert event["type"] == "transaction" + (event,) = (item.payload for item in items if item.type == "transaction") assert event["transaction"] == "anthropic" - assert len(event["spans"]) == 1 - (span,) = event["spans"] + spans = [item.payload for item in items if item.type == "span"] + assert len(spans) == 1 + (span,) = spans - assert span["op"] == OP.GEN_AI_CHAT - assert span["description"] == "chat model" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" - assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" + assert span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + assert span["name"] == "chat model" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" + assert span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" if send_default_pii and include_prompts: - assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS in span["data"] + assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS in span["attributes"] system_instructions = json.loads( - span["data"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS] + span["attributes"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS] ) assert system_instructions == [ {"type": "text", "content": "You are a helpful assistant."} ] - assert SPANDATA.GEN_AI_REQUEST_MESSAGES in span["data"] - stored_messages = json.loads(span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + assert SPANDATA.GEN_AI_REQUEST_MESSAGES in span["attributes"] + stored_messages = json.loads( + span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + ) assert len(stored_messages) == 1 assert stored_messages[0]["role"] == "user" assert stored_messages[0]["content"] == "Hello, Claude" - assert span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi, I'm Claude." + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi, I'm Claude." else: - assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in span["data"] - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] + assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in span["attributes"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["attributes"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["attributes"] - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30 - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is False - assert span["data"][SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS] == ["end_turn"] + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30 + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is False + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS] == ["end_turn"] @pytest.mark.asyncio @@ -2639,7 +2676,7 @@ def test_nonstreaming_create_message_with_system_prompt( ], ) async def test_nonstreaming_create_message_with_system_prompt_async( - sentry_init, capture_events, send_default_pii, include_prompts + sentry_init, capture_items, send_default_pii, include_prompts ): """Test that system prompts are properly captured in GEN_AI_REQUEST_MESSAGES (async).""" sentry_init( @@ -2647,7 +2684,7 @@ async def test_nonstreaming_create_message_with_system_prompt_async( traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("transaction", "span") client = AsyncAnthropic(api_key="z") client.messages._post = AsyncMock(return_value=EXAMPLE_MESSAGE) @@ -2672,46 +2709,46 @@ async def test_nonstreaming_create_message_with_system_prompt_async( assert usage.input_tokens == 10 assert usage.output_tokens == 20 - assert len(events) == 1 - (event,) = events - - assert event["type"] == "transaction" + (event,) = (item.payload for item in items if item.type == "transaction") assert event["transaction"] == "anthropic" - assert len(event["spans"]) == 1 - (span,) = event["spans"] + spans = [item.payload for item in items if item.type == "span"] + assert len(spans) == 1 + (span,) = spans - assert span["op"] == OP.GEN_AI_CHAT - assert span["description"] == "chat model" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" - assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" + assert span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + assert span["name"] == "chat model" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" + assert span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" if send_default_pii and include_prompts: - assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS in span["data"] + assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS in span["attributes"] system_instructions = json.loads( - span["data"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS] + span["attributes"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS] ) assert system_instructions == [ {"type": "text", "content": "You are a helpful assistant."} ] - assert SPANDATA.GEN_AI_REQUEST_MESSAGES in span["data"] - stored_messages = json.loads(span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + assert SPANDATA.GEN_AI_REQUEST_MESSAGES in span["attributes"] + stored_messages = json.loads( + span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + ) assert len(stored_messages) == 1 assert stored_messages[0]["role"] == "user" assert stored_messages[0]["content"] == "Hello, Claude" - assert span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi, I'm Claude." + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi, I'm Claude." else: - assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in span["data"] - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] + assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in span["attributes"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["attributes"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["attributes"] - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30 - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is False - assert span["data"][SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS] == ["end_turn"] + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30 + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is False + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS] == ["end_turn"] @pytest.mark.parametrize( @@ -2725,7 +2762,7 @@ async def test_nonstreaming_create_message_with_system_prompt_async( ) def test_streaming_create_message_with_system_prompt( sentry_init, - capture_events, + capture_items, send_default_pii, include_prompts, get_model_response, @@ -2776,7 +2813,7 @@ def test_streaming_create_message_with_system_prompt( traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [ { @@ -2802,46 +2839,46 @@ def test_streaming_create_message_with_system_prompt( for _ in message: pass - assert len(events) == 1 - (event,) = events - - assert event["type"] == "transaction" + (event,) = (item.payload for item in items if item.type == "transaction") assert event["transaction"] == "anthropic" - assert len(event["spans"]) == 1 - (span,) = event["spans"] + spans = [item.payload for item in items if item.type == "span"] + assert len(spans) == 1 + (span,) = spans - assert span["op"] == OP.GEN_AI_CHAT - assert span["description"] == "chat model" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" - assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" + assert span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + assert span["name"] == "chat model" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" + assert span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" if send_default_pii and include_prompts: - assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS in span["data"] + assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS in span["attributes"] system_instructions = json.loads( - span["data"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS] + span["attributes"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS] ) assert system_instructions == [ {"type": "text", "content": "You are a helpful assistant."} ] - assert SPANDATA.GEN_AI_REQUEST_MESSAGES in span["data"] - stored_messages = json.loads(span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + assert SPANDATA.GEN_AI_REQUEST_MESSAGES in span["attributes"] + stored_messages = json.loads( + span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + ) assert len(stored_messages) == 1 assert stored_messages[0]["role"] == "user" assert stored_messages[0]["content"] == "Hello, Claude" - assert span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi! I'm Claude!" + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi! I'm Claude!" else: - assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in span["data"] - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] + assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in span["attributes"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["attributes"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["attributes"] - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 20 - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 10 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 20 + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True @pytest.mark.parametrize( @@ -2855,7 +2892,7 @@ def test_streaming_create_message_with_system_prompt( ) def test_stream_messages_with_system_prompt( sentry_init, - capture_events, + capture_items, send_default_pii, include_prompts, get_model_response, @@ -2906,7 +2943,7 @@ def test_stream_messages_with_system_prompt( traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [ { @@ -2930,46 +2967,46 @@ def test_stream_messages_with_system_prompt( for event in stream: pass - assert len(events) == 1 - (event,) = events - - assert event["type"] == "transaction" + (event,) = (item.payload for item in items if item.type == "transaction") assert event["transaction"] == "anthropic" - assert len(event["spans"]) == 1 - (span,) = event["spans"] + spans = [item.payload for item in items if item.type == "span"] + assert len(spans) == 1 + (span,) = spans - assert span["op"] == OP.GEN_AI_CHAT - assert span["description"] == "chat model" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" - assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" + assert span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + assert span["name"] == "chat model" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" + assert span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" if send_default_pii and include_prompts: - assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS in span["data"] + assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS in span["attributes"] system_instructions = json.loads( - span["data"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS] + span["attributes"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS] ) assert system_instructions == [ {"type": "text", "content": "You are a helpful assistant."} ] - assert SPANDATA.GEN_AI_REQUEST_MESSAGES in span["data"] - stored_messages = json.loads(span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + assert SPANDATA.GEN_AI_REQUEST_MESSAGES in span["attributes"] + stored_messages = json.loads( + span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + ) assert len(stored_messages) == 1 assert stored_messages[0]["role"] == "user" assert stored_messages[0]["content"] == "Hello, Claude" - assert span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi! I'm Claude!" + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi! I'm Claude!" else: - assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in span["data"] - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] + assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in span["attributes"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["attributes"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["attributes"] - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 20 - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 10 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 20 + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True @pytest.mark.asyncio @@ -2984,7 +3021,7 @@ def test_stream_messages_with_system_prompt( ) async def test_stream_message_with_system_prompt_async( sentry_init, - capture_events, + capture_items, send_default_pii, include_prompts, get_model_response, @@ -3038,7 +3075,7 @@ async def test_stream_message_with_system_prompt_async( traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [ { @@ -3062,46 +3099,46 @@ async def test_stream_message_with_system_prompt_async( async for event in stream: pass - assert len(events) == 1 - (event,) = events - - assert event["type"] == "transaction" + (event,) = (item.payload for item in items if item.type == "transaction") assert event["transaction"] == "anthropic" - assert len(event["spans"]) == 1 - (span,) = event["spans"] + spans = [item.payload for item in items if item.type == "span"] + assert len(spans) == 1 + (span,) = spans - assert span["op"] == OP.GEN_AI_CHAT - assert span["description"] == "chat model" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" - assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" + assert span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + assert span["name"] == "chat model" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" + assert span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" if send_default_pii and include_prompts: - assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS in span["data"] + assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS in span["attributes"] system_instructions = json.loads( - span["data"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS] + span["attributes"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS] ) assert system_instructions == [ {"type": "text", "content": "You are a helpful assistant."} ] - assert SPANDATA.GEN_AI_REQUEST_MESSAGES in span["data"] - stored_messages = json.loads(span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + assert SPANDATA.GEN_AI_REQUEST_MESSAGES in span["attributes"] + stored_messages = json.loads( + span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + ) assert len(stored_messages) == 1 assert stored_messages[0]["role"] == "user" assert stored_messages[0]["content"] == "Hello, Claude" - assert span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi! I'm Claude!" + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi! I'm Claude!" else: - assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in span["data"] - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] + assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in span["attributes"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["attributes"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["attributes"] - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 20 - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 10 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 20 + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True @pytest.mark.asyncio @@ -3116,7 +3153,7 @@ async def test_stream_message_with_system_prompt_async( ) async def test_streaming_create_message_with_system_prompt_async( sentry_init, - capture_events, + capture_items, send_default_pii, include_prompts, get_model_response, @@ -3170,7 +3207,7 @@ async def test_streaming_create_message_with_system_prompt_async( traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [ { @@ -3196,56 +3233,56 @@ async def test_streaming_create_message_with_system_prompt_async( async for _ in message: pass - assert len(events) == 1 - (event,) = events - - assert event["type"] == "transaction" + (event,) = (item.payload for item in items if item.type == "transaction") assert event["transaction"] == "anthropic" - assert len(event["spans"]) == 1 - (span,) = event["spans"] + spans = [item.payload for item in items if item.type == "span"] + assert len(spans) == 1 + (span,) = spans - assert span["op"] == OP.GEN_AI_CHAT - assert span["description"] == "chat model" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" - assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" + assert span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + assert span["name"] == "chat model" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" + assert span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" if send_default_pii and include_prompts: - assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS in span["data"] + assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS in span["attributes"] system_instructions = json.loads( - span["data"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS] + span["attributes"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS] ) assert system_instructions == [ {"type": "text", "content": "You are a helpful assistant."} ] - assert SPANDATA.GEN_AI_REQUEST_MESSAGES in span["data"] - stored_messages = json.loads(span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + assert SPANDATA.GEN_AI_REQUEST_MESSAGES in span["attributes"] + stored_messages = json.loads( + span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + ) assert len(stored_messages) == 1 assert stored_messages[0]["role"] == "user" assert stored_messages[0]["content"] == "Hello, Claude" - assert span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi! I'm Claude!" + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi! I'm Claude!" else: - assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in span["data"] - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] + assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in span["attributes"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["attributes"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["attributes"] - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 20 - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 10 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 20 + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True -def test_system_prompt_with_complex_structure(sentry_init, capture_events): +def test_system_prompt_with_complex_structure(sentry_init, capture_items): """Test that complex system prompt structures (list of text blocks) are properly captured.""" sentry_init( integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") client = Anthropic(api_key="z") client.messages._post = mock.Mock(return_value=EXAMPLE_MESSAGE) @@ -3268,17 +3305,18 @@ def test_system_prompt_with_complex_structure(sentry_init, capture_events): ) assert response == EXAMPLE_MESSAGE - assert len(events) == 1 - (event,) = events - assert len(event["spans"]) == 1 - (span,) = event["spans"] + spans = [item.payload for item in items if item.type == "span"] + assert len(spans) == 1 + (span,) = spans - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" - assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "anthropic" + assert span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" - assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS in span["data"] - system_instructions = json.loads(span["data"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS]) + assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS in span["attributes"] + system_instructions = json.loads( + span["attributes"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS] + ) # System content should be a list of text blocks assert isinstance(system_instructions, list) @@ -3287,8 +3325,8 @@ def test_system_prompt_with_complex_structure(sentry_init, capture_events): {"type": "text", "content": "Be concise and clear."}, ] - assert SPANDATA.GEN_AI_REQUEST_MESSAGES in span["data"] - stored_messages = json.loads(span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + assert SPANDATA.GEN_AI_REQUEST_MESSAGES in span["attributes"] + stored_messages = json.loads(span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) assert len(stored_messages) == 1 assert stored_messages[0]["role"] == "user" @@ -3490,14 +3528,14 @@ def test_transform_message_content_list_anthropic(): # Integration tests for binary data in messages -def test_message_with_base64_image(sentry_init, capture_events): +def test_message_with_base64_image(sentry_init, capture_items): """Test that messages with base64 images are properly captured.""" sentry_init( integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") client = Anthropic(api_key="z") client.messages._post = mock.Mock(return_value=EXAMPLE_MESSAGE) @@ -3521,12 +3559,11 @@ def test_message_with_base64_image(sentry_init, capture_events): with start_transaction(name="anthropic"): client.messages.create(max_tokens=1024, messages=messages, model="model") - assert len(events) == 1 - (event,) = events - (span,) = event["spans"] + spans = [item.payload for item in items if item.type == "span"] + (span,) = spans - assert SPANDATA.GEN_AI_REQUEST_MESSAGES in span["data"] - stored_messages = json.loads(span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + assert SPANDATA.GEN_AI_REQUEST_MESSAGES in span["attributes"] + stored_messages = json.loads(span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) assert len(stored_messages) == 1 assert stored_messages[0]["role"] == "user" @@ -3541,14 +3578,14 @@ def test_message_with_base64_image(sentry_init, capture_events): } -def test_message_with_url_image(sentry_init, capture_events): +def test_message_with_url_image(sentry_init, capture_items): """Test that messages with URL-referenced images are properly captured.""" sentry_init( integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") client = Anthropic(api_key="z") client.messages._post = mock.Mock(return_value=EXAMPLE_MESSAGE) @@ -3571,11 +3608,10 @@ def test_message_with_url_image(sentry_init, capture_events): with start_transaction(name="anthropic"): client.messages.create(max_tokens=1024, messages=messages, model="model") - assert len(events) == 1 - (event,) = events - (span,) = event["spans"] + spans = [item.payload for item in items if item.type == "span"] + (span,) = spans - stored_messages = json.loads(span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + stored_messages = json.loads(span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) content = stored_messages[0]["content"] assert content[1] == { "type": "uri", @@ -3585,14 +3621,14 @@ def test_message_with_url_image(sentry_init, capture_events): } -def test_message_with_file_image(sentry_init, capture_events): +def test_message_with_file_image(sentry_init, capture_items): """Test that messages with file_id-referenced images are properly captured.""" sentry_init( integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") client = Anthropic(api_key="z") client.messages._post = mock.Mock(return_value=EXAMPLE_MESSAGE) @@ -3616,11 +3652,10 @@ def test_message_with_file_image(sentry_init, capture_events): with start_transaction(name="anthropic"): client.messages.create(max_tokens=1024, messages=messages, model="model") - assert len(events) == 1 - (event,) = events - (span,) = event["spans"] + spans = [item.payload for item in items if item.type == "span"] + (span,) = spans - stored_messages = json.loads(span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + stored_messages = json.loads(span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) content = stored_messages[0]["content"] assert content[1] == { "type": "file", @@ -3630,14 +3665,14 @@ def test_message_with_file_image(sentry_init, capture_events): } -def test_message_with_base64_pdf(sentry_init, capture_events): +def test_message_with_base64_pdf(sentry_init, capture_items): """Test that messages with base64-encoded PDF documents are properly captured.""" sentry_init( integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") client = Anthropic(api_key="z") client.messages._post = mock.Mock(return_value=EXAMPLE_MESSAGE) @@ -3661,11 +3696,10 @@ def test_message_with_base64_pdf(sentry_init, capture_events): with start_transaction(name="anthropic"): client.messages.create(max_tokens=1024, messages=messages, model="model") - assert len(events) == 1 - (event,) = events - (span,) = event["spans"] + spans = [item.payload for item in items if item.type == "span"] + (span,) = spans - stored_messages = json.loads(span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + stored_messages = json.loads(span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) content = stored_messages[0]["content"] assert content[1] == { "type": "blob", @@ -3675,14 +3709,14 @@ def test_message_with_base64_pdf(sentry_init, capture_events): } -def test_message_with_url_pdf(sentry_init, capture_events): +def test_message_with_url_pdf(sentry_init, capture_items): """Test that messages with URL-referenced PDF documents are properly captured.""" sentry_init( integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") client = Anthropic(api_key="z") client.messages._post = mock.Mock(return_value=EXAMPLE_MESSAGE) @@ -3705,11 +3739,10 @@ def test_message_with_url_pdf(sentry_init, capture_events): with start_transaction(name="anthropic"): client.messages.create(max_tokens=1024, messages=messages, model="model") - assert len(events) == 1 - (event,) = events - (span,) = event["spans"] + spans = [item.payload for item in items if item.type == "span"] + (span,) = spans - stored_messages = json.loads(span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + stored_messages = json.loads(span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) content = stored_messages[0]["content"] assert content[1] == { "type": "uri", @@ -3719,14 +3752,14 @@ def test_message_with_url_pdf(sentry_init, capture_events): } -def test_message_with_file_document(sentry_init, capture_events): +def test_message_with_file_document(sentry_init, capture_items): """Test that messages with file_id-referenced documents are properly captured.""" sentry_init( integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") client = Anthropic(api_key="z") client.messages._post = mock.Mock(return_value=EXAMPLE_MESSAGE) @@ -3750,11 +3783,10 @@ def test_message_with_file_document(sentry_init, capture_events): with start_transaction(name="anthropic"): client.messages.create(max_tokens=1024, messages=messages, model="model") - assert len(events) == 1 - (event,) = events - (span,) = event["spans"] + spans = [item.payload for item in items if item.type == "span"] + (span,) = spans - stored_messages = json.loads(span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + stored_messages = json.loads(span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) content = stored_messages[0]["content"] assert content[1] == { "type": "file", @@ -3764,14 +3796,14 @@ def test_message_with_file_document(sentry_init, capture_events): } -def test_message_with_mixed_content(sentry_init, capture_events): +def test_message_with_mixed_content(sentry_init, capture_items): """Test that messages with mixed content (text, images, documents) are properly captured.""" sentry_init( integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") client = Anthropic(api_key="z") client.messages._post = mock.Mock(return_value=EXAMPLE_MESSAGE) @@ -3811,11 +3843,10 @@ def test_message_with_mixed_content(sentry_init, capture_events): with start_transaction(name="anthropic"): client.messages.create(max_tokens=1024, messages=messages, model="model") - assert len(events) == 1 - (event,) = events - (span,) = event["spans"] + spans = [item.payload for item in items if item.type == "span"] + (span,) = spans - stored_messages = json.loads(span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + stored_messages = json.loads(span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) content = stored_messages[0]["content"] assert len(content) == 5 @@ -3847,14 +3878,14 @@ def test_message_with_mixed_content(sentry_init, capture_events): } -def test_message_with_multiple_images_different_formats(sentry_init, capture_events): +def test_message_with_multiple_images_different_formats(sentry_init, capture_items): """Test that messages with multiple images of different source types are handled.""" sentry_init( integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") client = Anthropic(api_key="z") client.messages._post = mock.Mock(return_value=EXAMPLE_MESSAGE) @@ -3893,11 +3924,10 @@ def test_message_with_multiple_images_different_formats(sentry_init, capture_eve with start_transaction(name="anthropic"): client.messages.create(max_tokens=1024, messages=messages, model="model") - assert len(events) == 1 - (event,) = events - (span,) = event["spans"] + spans = [item.payload for item in items if item.type == "span"] + (span,) = spans - stored_messages = json.loads(span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + stored_messages = json.loads(span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) content = stored_messages[0]["content"] assert len(content) == 4 @@ -3922,14 +3952,14 @@ def test_message_with_multiple_images_different_formats(sentry_init, capture_eve assert content[3] == {"type": "text", "text": "Compare these three images."} -def test_binary_content_not_stored_when_pii_disabled(sentry_init, capture_events): +def test_binary_content_not_stored_when_pii_disabled(sentry_init, capture_items): """Test that binary content is not stored when send_default_pii is False.""" sentry_init( integrations=[AnthropicIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=False, ) - events = capture_events() + items = capture_items("transaction", "span") client = Anthropic(api_key="z") client.messages._post = mock.Mock(return_value=EXAMPLE_MESSAGE) @@ -3953,22 +3983,21 @@ def test_binary_content_not_stored_when_pii_disabled(sentry_init, capture_events with start_transaction(name="anthropic"): client.messages.create(max_tokens=1024, messages=messages, model="model") - assert len(events) == 1 - (event,) = events - (span,) = event["spans"] + spans = [item.payload for item in items if item.type == "span"] + (span,) = spans # Messages should not be stored - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["attributes"] -def test_binary_content_not_stored_when_prompts_disabled(sentry_init, capture_events): +def test_binary_content_not_stored_when_prompts_disabled(sentry_init, capture_items): """Test that binary content is not stored when include_prompts is False.""" sentry_init( integrations=[AnthropicIntegration(include_prompts=False)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") client = Anthropic(api_key="z") client.messages._post = mock.Mock(return_value=EXAMPLE_MESSAGE) @@ -3992,18 +4021,20 @@ def test_binary_content_not_stored_when_prompts_disabled(sentry_init, capture_ev with start_transaction(name="anthropic"): client.messages.create(max_tokens=1024, messages=messages, model="model") - assert len(events) == 1 - (event,) = events - (span,) = event["spans"] + spans = [item.payload for item in items if item.type == "span"] + (span,) = spans # Messages should not be stored - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["attributes"] -def test_cache_tokens_nonstreaming(sentry_init, capture_events): +def test_cache_tokens_nonstreaming(sentry_init, capture_items): """Test cache read/write tokens are tracked for non-streaming responses.""" - sentry_init(integrations=[AnthropicIntegration()], traces_sample_rate=1.0) - events = capture_events() + sentry_init( + integrations=[AnthropicIntegration()], + traces_sample_rate=1.0, + ) + items = capture_items("transaction", "span") client = Anthropic(api_key="z") client.messages._post = mock.Mock( @@ -4029,16 +4060,16 @@ def test_cache_tokens_nonstreaming(sentry_init, capture_events): model="claude-3-5-sonnet-20241022", ) - (span,) = events[0]["spans"] + (span,) = (item.payload for item in items if item.type == "span") # input_tokens normalized: 100 + 80 (cache_read) + 20 (cache_write) = 200 - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 200 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 50 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 250 - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED] == 80 - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHE_WRITE] == 20 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 200 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 50 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 250 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED] == 80 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHE_WRITE] == 20 -def test_input_tokens_include_cache_write_nonstreaming(sentry_init, capture_events): +def test_input_tokens_include_cache_write_nonstreaming(sentry_init, capture_items): """ Test that gen_ai.usage.input_tokens includes cache_write tokens (non-streaming). @@ -4050,8 +4081,11 @@ def test_input_tokens_include_cache_write_nonstreaming(sentry_init, capture_even Usage(input_tokens=19, output_tokens=14, cache_creation_input_tokens=2846, cache_read_input_tokens=0) """ - sentry_init(integrations=[AnthropicIntegration()], traces_sample_rate=1.0) - events = capture_events() + sentry_init( + integrations=[AnthropicIntegration()], + traces_sample_rate=1.0, + ) + items = capture_items("transaction", "span") client = Anthropic(api_key="z") client.messages._post = mock.Mock( @@ -4077,16 +4111,16 @@ def test_input_tokens_include_cache_write_nonstreaming(sentry_init, capture_even model="claude-sonnet-4-20250514", ) - (span,) = events[0]["spans"] + (span,) = (item.payload for item in items if item.type == "span") # input_tokens should be total: 19 (non-cached) + 2846 (cache_write) = 2865 - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 2865 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 2879 # 2865 + 14 - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED] == 0 - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHE_WRITE] == 2846 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 2865 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 2879 # 2865 + 14 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED] == 0 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHE_WRITE] == 2846 -def test_input_tokens_include_cache_read_nonstreaming(sentry_init, capture_events): +def test_input_tokens_include_cache_read_nonstreaming(sentry_init, capture_items): """ Test that gen_ai.usage.input_tokens includes cache_read tokens (non-streaming). @@ -4098,8 +4132,11 @@ def test_input_tokens_include_cache_read_nonstreaming(sentry_init, capture_event Usage(input_tokens=19, output_tokens=14, cache_creation_input_tokens=0, cache_read_input_tokens=2846) """ - sentry_init(integrations=[AnthropicIntegration()], traces_sample_rate=1.0) - events = capture_events() + sentry_init( + integrations=[AnthropicIntegration()], + traces_sample_rate=1.0, + ) + items = capture_items("transaction", "span") client = Anthropic(api_key="z") client.messages._post = mock.Mock( @@ -4125,18 +4162,18 @@ def test_input_tokens_include_cache_read_nonstreaming(sentry_init, capture_event model="claude-sonnet-4-20250514", ) - (span,) = events[0]["spans"] + (span,) = [item.payload for item in items if item.type == "span"] # input_tokens should be total: 19 (non-cached) + 2846 (cache_read) = 2865 - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 2865 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 2879 # 2865 + 14 - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED] == 2846 - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHE_WRITE] == 0 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 2865 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 2879 # 2865 + 14 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED] == 2846 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHE_WRITE] == 0 def test_input_tokens_include_cache_read_streaming( sentry_init, - capture_events, + capture_items, get_model_response, server_side_event_chunks, ): @@ -4175,8 +4212,11 @@ def test_input_tokens_include_cache_read_streaming( ) ) - sentry_init(integrations=[AnthropicIntegration()], traces_sample_rate=1.0) - events = capture_events() + sentry_init( + integrations=[AnthropicIntegration()], + traces_sample_rate=1.0, + ) + items = capture_items("transaction", "span") with mock.patch.object( client._client, @@ -4192,18 +4232,18 @@ def test_input_tokens_include_cache_read_streaming( ): pass - (span,) = events[0]["spans"] + (span,) = (item.payload for item in items if item.type == "span") # input_tokens should be total: 19 + 2846 = test_stream_messages_input_tokens_include_cache_read_streaming - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 2865 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 2879 # 2865 + 14 - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED] == 2846 - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHE_WRITE] == 0 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 2865 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 2879 # 2865 + 14 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED] == 2846 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHE_WRITE] == 0 def test_stream_messages_input_tokens_include_cache_read_streaming( sentry_init, - capture_events, + capture_items, get_model_response, server_side_event_chunks, ): @@ -4241,8 +4281,11 @@ def test_stream_messages_input_tokens_include_cache_read_streaming( ) ) - sentry_init(integrations=[AnthropicIntegration()], traces_sample_rate=1.0) - events = capture_events() + sentry_init( + integrations=[AnthropicIntegration()], + traces_sample_rate=1.0, + ) + items = capture_items("transaction", "span") with mock.patch.object( client._client, @@ -4258,24 +4301,27 @@ def test_stream_messages_input_tokens_include_cache_read_streaming( for event in stream: pass - (span,) = events[0]["spans"] + (span,) = (item.payload for item in items if item.type == "span") # input_tokens should be total: 19 + 2846 = 2865 - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 2865 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 2879 # 2865 + 14 - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED] == 2846 - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHE_WRITE] == 0 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 2865 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 2879 # 2865 + 14 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED] == 2846 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHE_WRITE] == 0 -def test_input_tokens_unchanged_without_caching(sentry_init, capture_events): +def test_input_tokens_unchanged_without_caching(sentry_init, capture_items): """ Test that input_tokens is unchanged when there are no cached tokens. Real Anthropic response (from E2E test, simple call without caching): Usage(input_tokens=20, output_tokens=12) """ - sentry_init(integrations=[AnthropicIntegration()], traces_sample_rate=1.0) - events = capture_events() + sentry_init( + integrations=[AnthropicIntegration()], + traces_sample_rate=1.0, + ) + items = capture_items("transaction", "span") client = Anthropic(api_key="z") client.messages._post = mock.Mock( @@ -4299,15 +4345,15 @@ def test_input_tokens_unchanged_without_caching(sentry_init, capture_events): model="claude-sonnet-4-20250514", ) - (span,) = events[0]["spans"] + (span,) = (item.payload for item in items if item.type == "span") - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 20 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 32 # 20 + 12 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 20 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 32 # 20 + 12 def test_cache_tokens_streaming( sentry_init, - capture_events, + capture_items, get_model_response, server_side_event_chunks, ): @@ -4342,8 +4388,11 @@ def test_cache_tokens_streaming( ) ) - sentry_init(integrations=[AnthropicIntegration()], traces_sample_rate=1.0) - events = capture_events() + sentry_init( + integrations=[AnthropicIntegration()], + traces_sample_rate=1.0, + ) + items = capture_items("transaction", "span") with mock.patch.object( client._client, @@ -4359,17 +4408,17 @@ def test_cache_tokens_streaming( ): pass - (span,) = events[0]["spans"] + (span,) = (item.payload for item in items if item.type == "span") # input_tokens normalized: 100 + 80 (cache_read) + 20 (cache_write) = 200 - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 200 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 210 - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED] == 80 - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHE_WRITE] == 20 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 200 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 10 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 210 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED] == 80 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHE_WRITE] == 20 def test_stream_messages_cache_tokens( - sentry_init, capture_events, get_model_response, server_side_event_chunks + sentry_init, capture_items, get_model_response, server_side_event_chunks ): """Test cache tokens are tracked for streaming responses.""" client = Anthropic(api_key="z") @@ -4402,8 +4451,11 @@ def test_stream_messages_cache_tokens( ) ) - sentry_init(integrations=[AnthropicIntegration()], traces_sample_rate=1.0) - events = capture_events() + sentry_init( + integrations=[AnthropicIntegration()], + traces_sample_rate=1.0, + ) + items = capture_items("transaction", "span") with mock.patch.object( client._client, @@ -4419,10 +4471,10 @@ def test_stream_messages_cache_tokens( for event in stream: pass - (span,) = events[0]["spans"] + (span,) = (item.payload for item in items if item.type == "span") # input_tokens normalized: 100 + 80 (cache_read) + 20 (cache_write) = 200 - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 200 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 210 - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED] == 80 - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHE_WRITE] == 20 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 200 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 10 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 210 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED] == 80 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHE_WRITE] == 20 diff --git a/tests/integrations/google_genai/test_google_genai.py b/tests/integrations/google_genai/test_google_genai.py index 6e91ba6634..3974041314 100644 --- a/tests/integrations/google_genai/test_google_genai.py +++ b/tests/integrations/google_genai/test_google_genai.py @@ -124,14 +124,14 @@ def create_test_config( ], ) def test_nonstreaming_generate_content( - sentry_init, capture_events, send_default_pii, include_prompts, mock_genai_client + sentry_init, capture_items, send_default_pii, include_prompts, mock_genai_client ): sentry_init( integrations=[GoogleGenAIIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("transaction", "span") # Mock the HTTP response at the _api_client.request() level mock_http_response = create_mock_http_response(EXAMPLE_API_RESPONSE_JSON) @@ -146,38 +146,37 @@ def test_nonstreaming_generate_content( mock_genai_client.models.generate_content( model="gemini-1.5-flash", contents="Tell me a joke", config=config ) - assert len(events) == 1 - (event,) = events - assert event["type"] == "transaction" + (event,) = (item.payload for item in items if item.type == "transaction") assert event["transaction"] == "google_genai" - assert len(event["spans"]) == 1 - chat_span = event["spans"][0] + spans = [item.payload for item in items if item.type == "span"] + assert len(spans) == 1 + chat_span = next(item.payload for item in items if item.type == "span") # Check chat span - assert chat_span["op"] == OP.GEN_AI_CHAT - assert chat_span["description"] == "chat gemini-1.5-flash" - assert chat_span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" - assert chat_span["data"][SPANDATA.GEN_AI_SYSTEM] == "gcp.gemini" - assert chat_span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "gemini-1.5-flash" + assert chat_span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + assert chat_span["name"] == "chat gemini-1.5-flash" + assert chat_span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + assert chat_span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "gcp.gemini" + assert chat_span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "gemini-1.5-flash" if send_default_pii and include_prompts: # Response text is stored as a JSON array - response_text = chat_span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] + response_text = chat_span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] # Parse the JSON array response_texts = json.loads(response_text) assert response_texts == ["Hello! How can I help you today?"] else: - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in chat_span["data"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in chat_span["attributes"] # Check token usage - assert chat_span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 + assert chat_span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 # Output tokens now include reasoning tokens: candidates_token_count (20) + thoughts_token_count (3) = 23 - assert chat_span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 23 - assert chat_span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30 - assert chat_span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED] == 5 - assert chat_span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS_REASONING] == 3 + assert chat_span["attributes"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 23 + assert chat_span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30 + assert chat_span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED] == 5 + assert chat_span["attributes"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS_REASONING] == 3 @pytest.mark.parametrize("generate_content_config", (False, True)) @@ -210,7 +209,7 @@ def test_nonstreaming_generate_content( ) def test_generate_content_with_system_instruction( sentry_init, - capture_events, + capture_items, mock_genai_client, generate_content_config, system_instructions, @@ -221,7 +220,7 @@ def test_generate_content_with_system_instruction( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("span") mock_http_response = create_mock_http_response(EXAMPLE_API_RESPONSE_JSON) @@ -243,16 +242,15 @@ def test_generate_content_with_system_instruction( config=config, ) - (event,) = events - invoke_span = event["spans"][0] + invoke_span = next(item.payload for item in items if item.type == "span") if expected_texts is None: - assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in invoke_span["data"] + assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in invoke_span["attributes"] return # (PII is enabled and include_prompts is True in this test) system_instructions = json.loads( - invoke_span["data"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS] + invoke_span["attributes"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS] ) assert system_instructions == [ @@ -260,12 +258,12 @@ def test_generate_content_with_system_instruction( ] -def test_generate_content_with_tools(sentry_init, capture_events, mock_genai_client): +def test_generate_content_with_tools(sentry_init, capture_items, mock_genai_client): sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("span") # Create a mock tool function def get_weather(location: str) -> str: @@ -319,18 +317,17 @@ def get_weather(location: str) -> str: model="gemini-1.5-flash", contents="What's the weather?", config=config ) - (event,) = events - invoke_span = event["spans"][0] + invoke_span = next(item.payload for item in items if item.type == "span") # Check that tools are recorded (data is serialized as a string) - tools_data_str = invoke_span["data"][SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS] + tools_data_str = invoke_span["attributes"][SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS] # Parse the JSON string to verify content tools_data = json.loads(tools_data_str) assert len(tools_data) == 2 # The order of tools may not be guaranteed, so sort by name and description for comparison sorted_tools = sorted( - tools_data, key=lambda t: (t.get("name", ""), t.get("description", "")) + tools_data, key=lambda t: (t.get("name", ""), t.get("name", "")) ) # The function tool @@ -342,13 +339,13 @@ def get_weather(location: str) -> str: assert sorted_tools[1]["description"] == "Get weather information (tool object)" -def test_tool_execution(sentry_init, capture_events): +def test_tool_execution(sentry_init, capture_items): sentry_init( integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("span") # Create a mock tool function def get_weather(location: str) -> str: @@ -366,25 +363,25 @@ def get_weather(location: str) -> str: assert result == "The weather in San Francisco is sunny" - (event,) = events - assert len(event["spans"]) == 1 - tool_span = event["spans"][0] + spans = [item.payload for item in items if item.type == "span"] + assert len(spans) == 1 + tool_span = next(item.payload for item in items if item.type == "span") - assert tool_span["op"] == OP.GEN_AI_EXECUTE_TOOL - assert tool_span["description"] == "execute_tool get_weather" - assert tool_span["data"][SPANDATA.GEN_AI_TOOL_NAME] == "get_weather" + assert tool_span["attributes"]["sentry.op"] == OP.GEN_AI_EXECUTE_TOOL + assert tool_span["name"] == "execute_tool get_weather" + assert tool_span["attributes"][SPANDATA.GEN_AI_TOOL_NAME] == "get_weather" assert ( - tool_span["data"][SPANDATA.GEN_AI_TOOL_DESCRIPTION] + tool_span["attributes"][SPANDATA.GEN_AI_TOOL_DESCRIPTION] == "Get the weather for a location" ) -def test_error_handling(sentry_init, capture_events, mock_genai_client): +def test_error_handling(sentry_init, capture_items, mock_genai_client): sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("event", "transaction") # Mock an error at the HTTP level with mock.patch.object( @@ -398,9 +395,7 @@ def test_error_handling(sentry_init, capture_events, mock_genai_client): config=create_test_config(), ) - # Should have both transaction and error events - assert len(events) == 2 - error_event, transaction_event = events + (error_event,) = (item.payload for item in items if item.type == "event") assert error_event["level"] == "error" assert error_event["exception"]["values"][0]["type"] == "Exception" @@ -408,14 +403,14 @@ def test_error_handling(sentry_init, capture_events, mock_genai_client): assert error_event["exception"]["values"][0]["mechanism"]["type"] == "google_genai" -def test_streaming_generate_content(sentry_init, capture_events, mock_genai_client): +def test_streaming_generate_content(sentry_init, capture_items, mock_genai_client): """Test streaming with generate_content_stream, verifying chunk accumulation.""" sentry_init( integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("span") # Create streaming chunks - simulating a multi-chunk response # Chunk 1: First part of text with partial usage metadata @@ -497,40 +492,41 @@ def test_streaming_generate_content(sentry_init, capture_events, mock_genai_clie assert collected_chunks[1].candidates[0].content.parts[0].text == "How can I " assert collected_chunks[2].candidates[0].content.parts[0].text == "help you today?" - (event,) = events - - assert len(event["spans"]) == 1 - chat_span = event["spans"][0] + spans = [item.payload for item in items if item.type == "span"] + assert len(spans) == 1 + chat_span = next(item.payload for item in items if item.type == "span") # Check that streaming flag is set on both spans - assert chat_span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True + assert chat_span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True # Verify accumulated response text (all chunks combined) expected_full_text = "Hello! How can I help you today?" # Response text is stored as a JSON string - chat_response_text = json.loads(chat_span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT]) + chat_response_text = json.loads( + chat_span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] + ) assert chat_response_text == [expected_full_text] # Verify finish reasons (only the final chunk has a finish reason) # When there's a single finish reason, it's stored as a plain string (not JSON) - assert SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS in chat_span["data"] - assert chat_span["data"][SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS] == "STOP" - assert chat_span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 - assert chat_span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 10 - assert chat_span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 25 - assert chat_span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED] == 5 - assert chat_span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS_REASONING] == 3 + assert SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS in chat_span["attributes"] + assert chat_span["attributes"][SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS] == "STOP" + assert chat_span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 + assert chat_span["attributes"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 10 + assert chat_span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 25 + assert chat_span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED] == 5 + assert chat_span["attributes"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS_REASONING] == 3 # Verify model name - assert chat_span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "gemini-1.5-flash" + assert chat_span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "gemini-1.5-flash" -def test_span_origin(sentry_init, capture_events, mock_genai_client): +def test_span_origin(sentry_init, capture_items, mock_genai_client): sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("span", "transaction") mock_http_response = create_mock_http_response(EXAMPLE_API_RESPONSE_JSON) @@ -543,22 +539,21 @@ def test_span_origin(sentry_init, capture_events, mock_genai_client): model="gemini-1.5-flash", contents="Test origin", config=config ) - (event,) = events - + (event,) = (item.payload for item in items if item.type == "transaction") assert event["contexts"]["trace"]["origin"] == "manual" - for span in event["spans"]: - assert span["origin"] == "auto.ai.google_genai" + spans = [item.payload for item in items if item.type == "span"] + for span in spans: + assert span["attributes"]["sentry.origin"] == "auto.ai.google_genai" -def test_response_without_usage_metadata( - sentry_init, capture_events, mock_genai_client -): + +def test_response_without_usage_metadata(sentry_init, capture_items, mock_genai_client): """Test handling of responses without usage metadata""" sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("span") # Response without usage metadata response_json = { @@ -584,23 +579,22 @@ def test_response_without_usage_metadata( model="gemini-1.5-flash", contents="Test", config=config ) - (event,) = events - chat_span = event["spans"][0] + chat_span = next(item.payload for item in items if item.type == "span") # Usage data should not be present - assert SPANDATA.GEN_AI_USAGE_INPUT_TOKENS not in chat_span["data"] - assert SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS not in chat_span["data"] - assert SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS not in chat_span["data"] + assert SPANDATA.GEN_AI_USAGE_INPUT_TOKENS not in chat_span["attributes"] + assert SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS not in chat_span["attributes"] + assert SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS not in chat_span["attributes"] -def test_multiple_candidates(sentry_init, capture_events, mock_genai_client): +def test_multiple_candidates(sentry_init, capture_items, mock_genai_client): """Test handling of multiple response candidates""" sentry_init( integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("span") # Response with multiple candidates multi_candidate_json = { @@ -638,12 +632,11 @@ def test_multiple_candidates(sentry_init, capture_events, mock_genai_client): model="gemini-1.5-flash", contents="Generate multiple", config=config ) - (event,) = events - chat_span = event["spans"][0] + chat_span = next(item.payload for item in items if item.type == "span") # Should capture all responses # Response text is stored as a JSON string when there are multiple responses - response_text = chat_span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] + response_text = chat_span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] if isinstance(response_text, str) and response_text.startswith("["): # It's a JSON array response_list = json.loads(response_text) @@ -654,18 +647,18 @@ def test_multiple_candidates(sentry_init, capture_events, mock_genai_client): # Finish reasons are serialized as JSON finish_reasons = json.loads( - chat_span["data"][SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS] + chat_span["attributes"][SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS] ) assert finish_reasons == ["STOP", "MAX_TOKENS"] -def test_all_configuration_parameters(sentry_init, capture_events, mock_genai_client): +def test_all_configuration_parameters(sentry_init, capture_items, mock_genai_client): """Test that all configuration parameters are properly recorded""" sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("span") mock_http_response = create_mock_http_response(EXAMPLE_API_RESPONSE_JSON) @@ -686,26 +679,25 @@ def test_all_configuration_parameters(sentry_init, capture_events, mock_genai_cl model="gemini-1.5-flash", contents="Test all params", config=config ) - (event,) = events - invoke_span = event["spans"][0] + invoke_span = next(item.payload for item in items if item.type == "span") # Check all parameters are recorded - assert invoke_span["data"][SPANDATA.GEN_AI_REQUEST_TEMPERATURE] == 0.8 - assert invoke_span["data"][SPANDATA.GEN_AI_REQUEST_TOP_P] == 0.95 - assert invoke_span["data"][SPANDATA.GEN_AI_REQUEST_TOP_K] == 40 - assert invoke_span["data"][SPANDATA.GEN_AI_REQUEST_MAX_TOKENS] == 2048 - assert invoke_span["data"][SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY] == 0.1 - assert invoke_span["data"][SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY] == 0.2 - assert invoke_span["data"][SPANDATA.GEN_AI_REQUEST_SEED] == 12345 + assert invoke_span["attributes"][SPANDATA.GEN_AI_REQUEST_TEMPERATURE] == 0.8 + assert invoke_span["attributes"][SPANDATA.GEN_AI_REQUEST_TOP_P] == 0.95 + assert invoke_span["attributes"][SPANDATA.GEN_AI_REQUEST_TOP_K] == 40 + assert invoke_span["attributes"][SPANDATA.GEN_AI_REQUEST_MAX_TOKENS] == 2048 + assert invoke_span["attributes"][SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY] == 0.1 + assert invoke_span["attributes"][SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY] == 0.2 + assert invoke_span["attributes"][SPANDATA.GEN_AI_REQUEST_SEED] == 12345 -def test_empty_response(sentry_init, capture_events, mock_genai_client): +def test_empty_response(sentry_init, capture_items, mock_genai_client): """Test handling of minimal response with no content""" sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("span") # Minimal response with empty candidates array minimal_response_json = {"candidates": []} @@ -723,20 +715,20 @@ def test_empty_response(sentry_init, capture_events, mock_genai_client): assert response is not None assert len(response.candidates) == 0 - (event,) = events # Should still create spans even with empty candidates - assert len(event["spans"]) == 1 + spans = [item.payload for item in items if item.type == "span"] + assert len(spans) == 1 def test_response_with_different_id_fields( - sentry_init, capture_events, mock_genai_client + sentry_init, capture_items, mock_genai_client ): """Test handling of different response ID field names""" sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("span") # Response with response_id and model_version response_json = { @@ -763,20 +755,21 @@ def test_response_with_different_id_fields( model="gemini-1.5-flash", contents="Test", config=create_test_config() ) - (event,) = events - chat_span = event["spans"][0] + chat_span = next(item.payload for item in items if item.type == "span") - assert chat_span["data"][SPANDATA.GEN_AI_RESPONSE_ID] == "resp-456" - assert chat_span["data"][SPANDATA.GEN_AI_RESPONSE_MODEL] == "gemini-1.5-flash-001" + assert chat_span["attributes"][SPANDATA.GEN_AI_RESPONSE_ID] == "resp-456" + assert ( + chat_span["attributes"][SPANDATA.GEN_AI_RESPONSE_MODEL] + == "gemini-1.5-flash-001" + ) -def test_tool_with_async_function(sentry_init, capture_events): +def test_tool_with_async_function(sentry_init): """Test that async tool functions are properly wrapped""" sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, ) - capture_events() # Create an async tool function async def async_tool(param: str) -> str: @@ -792,14 +785,14 @@ async def async_tool(param: str) -> str: assert hasattr(wrapped_async_tool, "__wrapped__") # Should preserve original -def test_contents_as_none(sentry_init, capture_events, mock_genai_client): +def test_contents_as_none(sentry_init, capture_items, mock_genai_client): """Test handling when contents parameter is None""" sentry_init( integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("span") mock_http_response = create_mock_http_response(EXAMPLE_API_RESPONSE_JSON) @@ -811,22 +804,21 @@ def test_contents_as_none(sentry_init, capture_events, mock_genai_client): model="gemini-1.5-flash", contents=None, config=create_test_config() ) - (event,) = events - invoke_span = event["spans"][0] + invoke_span = next(item.payload for item in items if item.type == "span") # Should handle None contents gracefully - messages = invoke_span["data"].get(SPANDATA.GEN_AI_REQUEST_MESSAGES, []) + messages = invoke_span["attributes"].get(SPANDATA.GEN_AI_REQUEST_MESSAGES, []) # Should only have system message if any, not user message assert all(msg["role"] != "user" or msg["content"] is not None for msg in messages) -def test_tool_calls_extraction(sentry_init, capture_events, mock_genai_client): +def test_tool_calls_extraction(sentry_init, capture_items, mock_genai_client): """Test extraction of tool/function calls from response""" sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("span") # Response with function calls function_call_response_json = { @@ -875,14 +867,17 @@ def test_tool_calls_extraction(sentry_init, capture_events, mock_genai_client): config=create_test_config(), ) - (event,) = events - chat_span = event["spans"][0] # The chat span + chat_span = next( + item.payload for item in items if item.type == "span" + ) # The chat span # Check that tool calls are extracted and stored - assert SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS in chat_span["data"] + assert SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS in chat_span["attributes"] # Parse the JSON string to verify content - tool_calls = json.loads(chat_span["data"][SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS]) + tool_calls = json.loads( + chat_span["attributes"][SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS] + ) assert len(tool_calls) == 2 @@ -902,16 +897,14 @@ def test_tool_calls_extraction(sentry_init, capture_events, mock_genai_client): assert json.loads(tool_calls[1]["arguments"]) == {"timezone": "PST"} -def test_google_genai_message_truncation( - sentry_init, capture_events, mock_genai_client -): +def test_google_genai_message_truncation(sentry_init, capture_items, mock_genai_client): """Test that large messages are truncated properly in Google GenAI integration.""" sentry_init( integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("span") large_content = ( "This is a very long message that will exceed our size limits. " * 1000 @@ -930,11 +923,10 @@ def test_google_genai_message_truncation( config=create_test_config(), ) - (event,) = events - invoke_span = event["spans"][0] - assert SPANDATA.GEN_AI_REQUEST_MESSAGES in invoke_span["data"] + invoke_span = next(item.payload for item in items if item.type == "span") + assert SPANDATA.GEN_AI_REQUEST_MESSAGES in invoke_span["attributes"] - messages_data = invoke_span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + messages_data = invoke_span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] assert isinstance(messages_data, str) parsed_messages = json.loads(messages_data) @@ -980,14 +972,14 @@ def test_google_genai_message_truncation( ], ) def test_embed_content( - sentry_init, capture_events, send_default_pii, include_prompts, mock_genai_client + sentry_init, capture_items, send_default_pii, include_prompts, mock_genai_client ): sentry_init( integrations=[GoogleGenAIIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("transaction", "span") # Mock the HTTP response at the _api_client.request() level mock_http_response = create_mock_http_response(EXAMPLE_EMBED_RESPONSE_JSON) @@ -1006,47 +998,49 @@ def test_embed_content( ], ) - assert len(events) == 1 - (event,) = events - - assert event["type"] == "transaction" + (event,) = (item.payload for item in items if item.type == "transaction") assert event["transaction"] == "google_genai_embeddings" # Should have 1 span for embeddings - assert len(event["spans"]) == 1 - (embed_span,) = event["spans"] + spans = [item.payload for item in items if item.type == "span"] + assert len(spans) == 1 + (embed_span,) = spans # Check embeddings span - assert embed_span["op"] == OP.GEN_AI_EMBEDDINGS - assert embed_span["description"] == "embeddings text-embedding-004" - assert embed_span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "embeddings" - assert embed_span["data"][SPANDATA.GEN_AI_SYSTEM] == "gcp.gemini" - assert embed_span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "text-embedding-004" + assert embed_span["attributes"]["sentry.op"] == OP.GEN_AI_EMBEDDINGS + assert embed_span["name"] == "embeddings text-embedding-004" + assert embed_span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "embeddings" + assert embed_span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "gcp.gemini" + assert ( + embed_span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "text-embedding-004" + ) # Check input texts if PII is allowed if send_default_pii and include_prompts: - input_texts = json.loads(embed_span["data"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT]) + input_texts = json.loads( + embed_span["attributes"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT] + ) assert input_texts == [ "What is your name?", "What is your favorite color?", ] else: - assert SPANDATA.GEN_AI_EMBEDDINGS_INPUT not in embed_span["data"] + assert SPANDATA.GEN_AI_EMBEDDINGS_INPUT not in embed_span["attributes"] # Check usage data (sum of token counts from statistics: 10 + 15 = 25) # Note: Only available in newer versions with ContentEmbeddingStatistics - if SPANDATA.GEN_AI_USAGE_INPUT_TOKENS in embed_span["data"]: - assert embed_span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 25 + if SPANDATA.GEN_AI_USAGE_INPUT_TOKENS in embed_span["attributes"]: + assert embed_span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 25 -def test_embed_content_string_input(sentry_init, capture_events, mock_genai_client): +def test_embed_content_string_input(sentry_init, capture_items, mock_genai_client): """Test embed_content with a single string instead of list.""" sentry_init( integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("span") # Mock response with single embedding single_embed_response = { @@ -1074,25 +1068,25 @@ def test_embed_content_string_input(sentry_init, capture_events, mock_genai_clie contents="Single text input", ) - (event,) = events - (embed_span,) = event["spans"] + spans = [item.payload for item in items if item.type == "span"] + (embed_span,) = spans # Check that single string is handled correctly - input_texts = json.loads(embed_span["data"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT]) + input_texts = json.loads(embed_span["attributes"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT]) assert input_texts == ["Single text input"] # Should use token_count from statistics (5), not billable_character_count (10) # Note: Only available in newer versions with ContentEmbeddingStatistics - if SPANDATA.GEN_AI_USAGE_INPUT_TOKENS in embed_span["data"]: - assert embed_span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 5 + if SPANDATA.GEN_AI_USAGE_INPUT_TOKENS in embed_span["attributes"]: + assert embed_span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 5 -def test_embed_content_error_handling(sentry_init, capture_events, mock_genai_client): +def test_embed_content_error_handling(sentry_init, capture_items, mock_genai_client): """Test error handling in embed_content.""" sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "event") # Mock an error at the HTTP level with mock.patch.object( @@ -1107,9 +1101,7 @@ def test_embed_content_error_handling(sentry_init, capture_events, mock_genai_cl contents=["This will fail"], ) - # Should have both transaction and error events - assert len(events) == 2 - error_event, _ = events + (error_event,) = (item.payload for item in items if item.type == "event") assert error_event["level"] == "error" assert error_event["exception"]["values"][0]["type"] == "Exception" @@ -1118,14 +1110,14 @@ def test_embed_content_error_handling(sentry_init, capture_events, mock_genai_cl def test_embed_content_without_statistics( - sentry_init, capture_events, mock_genai_client + sentry_init, capture_items, mock_genai_client ): """Test embed_content response without statistics (older package versions).""" sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("span") # Response without statistics (typical for older google-genai versions) # Embeddings exist but don't have the statistics field @@ -1150,21 +1142,21 @@ def test_embed_content_without_statistics( contents=["Test without statistics", "Another test"], ) - (event,) = events - (embed_span,) = event["spans"] + spans = [item.payload for item in items if item.type == "span"] + (embed_span,) = spans # No usage tokens since there are no statistics in older versions # This is expected and the integration should handle it gracefully - assert SPANDATA.GEN_AI_USAGE_INPUT_TOKENS not in embed_span["data"] + assert SPANDATA.GEN_AI_USAGE_INPUT_TOKENS not in embed_span["attributes"] -def test_embed_content_span_origin(sentry_init, capture_events, mock_genai_client): +def test_embed_content_span_origin(sentry_init, capture_items, mock_genai_client): """Test that embed_content spans have correct origin.""" sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") mock_http_response = create_mock_http_response(EXAMPLE_EMBED_RESPONSE_JSON) @@ -1177,11 +1169,12 @@ def test_embed_content_span_origin(sentry_init, capture_events, mock_genai_clien contents=["Test origin"], ) - (event,) = events - + (event,) = (item.payload for item in items if item.type == "transaction") assert event["contexts"]["trace"]["origin"] == "manual" - for span in event["spans"]: - assert span["origin"] == "auto.ai.google_genai" + + spans = [item.payload for item in items if item.type == "span"] + for span in spans: + assert span["attributes"]["sentry.origin"] == "auto.ai.google_genai" @pytest.mark.asyncio @@ -1195,7 +1188,7 @@ def test_embed_content_span_origin(sentry_init, capture_events, mock_genai_clien ], ) async def test_async_embed_content( - sentry_init, capture_events, send_default_pii, include_prompts, mock_genai_client + sentry_init, capture_items, send_default_pii, include_prompts, mock_genai_client ): """Test async embed_content method.""" sentry_init( @@ -1203,7 +1196,7 @@ async def test_async_embed_content( traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("transaction", "span") # Mock the async HTTP response mock_http_response = create_mock_http_response(EXAMPLE_EMBED_RESPONSE_JSON) @@ -1222,42 +1215,44 @@ async def test_async_embed_content( ], ) - assert len(events) == 1 - (event,) = events - - assert event["type"] == "transaction" + (event,) = (item.payload for item in items if item.type == "transaction") assert event["transaction"] == "google_genai_embeddings_async" # Should have 1 span for embeddings - assert len(event["spans"]) == 1 - (embed_span,) = event["spans"] + spans = [item.payload for item in items if item.type == "span"] + assert len(spans) == 1 + (embed_span,) = spans # Check embeddings span - assert embed_span["op"] == OP.GEN_AI_EMBEDDINGS - assert embed_span["description"] == "embeddings text-embedding-004" - assert embed_span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "embeddings" - assert embed_span["data"][SPANDATA.GEN_AI_SYSTEM] == "gcp.gemini" - assert embed_span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "text-embedding-004" + assert embed_span["attributes"]["sentry.op"] == OP.GEN_AI_EMBEDDINGS + assert embed_span["name"] == "embeddings text-embedding-004" + assert embed_span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "embeddings" + assert embed_span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "gcp.gemini" + assert ( + embed_span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "text-embedding-004" + ) # Check input texts if PII is allowed if send_default_pii and include_prompts: - input_texts = json.loads(embed_span["data"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT]) + input_texts = json.loads( + embed_span["attributes"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT] + ) assert input_texts == [ "What is your name?", "What is your favorite color?", ] else: - assert SPANDATA.GEN_AI_EMBEDDINGS_INPUT not in embed_span["data"] + assert SPANDATA.GEN_AI_EMBEDDINGS_INPUT not in embed_span["attributes"] # Check usage data (sum of token counts from statistics: 10 + 15 = 25) # Note: Only available in newer versions with ContentEmbeddingStatistics - if SPANDATA.GEN_AI_USAGE_INPUT_TOKENS in embed_span["data"]: - assert embed_span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 25 + if SPANDATA.GEN_AI_USAGE_INPUT_TOKENS in embed_span["attributes"]: + assert embed_span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 25 @pytest.mark.asyncio async def test_async_embed_content_string_input( - sentry_init, capture_events, mock_genai_client + sentry_init, capture_items, mock_genai_client ): """Test async embed_content with a single string instead of list.""" sentry_init( @@ -1265,7 +1260,7 @@ async def test_async_embed_content_string_input( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("span") # Mock response with single embedding single_embed_response = { @@ -1293,28 +1288,28 @@ async def test_async_embed_content_string_input( contents="Single text input", ) - (event,) = events - (embed_span,) = event["spans"] + spans = [item.payload for item in items if item.type == "span"] + (embed_span,) = spans # Check that single string is handled correctly - input_texts = json.loads(embed_span["data"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT]) + input_texts = json.loads(embed_span["attributes"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT]) assert input_texts == ["Single text input"] # Should use token_count from statistics (5), not billable_character_count (10) # Note: Only available in newer versions with ContentEmbeddingStatistics - if SPANDATA.GEN_AI_USAGE_INPUT_TOKENS in embed_span["data"]: - assert embed_span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 5 + if SPANDATA.GEN_AI_USAGE_INPUT_TOKENS in embed_span["attributes"]: + assert embed_span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 5 @pytest.mark.asyncio async def test_async_embed_content_error_handling( - sentry_init, capture_events, mock_genai_client + sentry_init, capture_items, mock_genai_client ): """Test error handling in async embed_content.""" sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "event") # Mock an error at the HTTP level with mock.patch.object( @@ -1329,9 +1324,7 @@ async def test_async_embed_content_error_handling( contents=["This will fail"], ) - # Should have both transaction and error events - assert len(events) == 2 - error_event, _ = events + (error_event,) = (item.payload for item in items if item.type == "event") assert error_event["level"] == "error" assert error_event["exception"]["values"][0]["type"] == "Exception" @@ -1341,14 +1334,14 @@ async def test_async_embed_content_error_handling( @pytest.mark.asyncio async def test_async_embed_content_without_statistics( - sentry_init, capture_events, mock_genai_client + sentry_init, capture_items, mock_genai_client ): """Test async embed_content response without statistics (older package versions).""" sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("span") # Response without statistics (typical for older google-genai versions) # Embeddings exist but don't have the statistics field @@ -1373,24 +1366,24 @@ async def test_async_embed_content_without_statistics( contents=["Test without statistics", "Another test"], ) - (event,) = events - (embed_span,) = event["spans"] + spans = [item.payload for item in items if item.type == "span"] + (embed_span,) = spans # No usage tokens since there are no statistics in older versions # This is expected and the integration should handle it gracefully - assert SPANDATA.GEN_AI_USAGE_INPUT_TOKENS not in embed_span["data"] + assert SPANDATA.GEN_AI_USAGE_INPUT_TOKENS not in embed_span["attributes"] @pytest.mark.asyncio async def test_async_embed_content_span_origin( - sentry_init, capture_events, mock_genai_client + sentry_init, capture_items, mock_genai_client ): """Test that async embed_content spans have correct origin.""" sentry_init( integrations=[GoogleGenAIIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") mock_http_response = create_mock_http_response(EXAMPLE_EMBED_RESPONSE_JSON) @@ -1403,16 +1396,17 @@ async def test_async_embed_content_span_origin( contents=["Test origin"], ) - (event,) = events - + (event,) = [item.payload for item in items if item.type == "transaction"] assert event["contexts"]["trace"]["origin"] == "manual" - for span in event["spans"]: - assert span["origin"] == "auto.ai.google_genai" + + spans = [item.payload for item in items if item.type == "span"] + for span in spans: + assert span["attributes"]["sentry.origin"] == "auto.ai.google_genai" # Integration tests for generate_content with different input message formats def test_generate_content_with_content_object( - sentry_init, capture_events, mock_genai_client + sentry_init, capture_items, mock_genai_client ): """Test generate_content with Content object input.""" sentry_init( @@ -1420,7 +1414,7 @@ def test_generate_content_with_content_object( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("span") mock_http_response = create_mock_http_response(EXAMPLE_API_RESPONSE_JSON) @@ -1437,10 +1431,9 @@ def test_generate_content_with_content_object( model="gemini-1.5-flash", contents=content, config=create_test_config() ) - (event,) = events - invoke_span = event["spans"][0] + invoke_span = next(item.payload for item in items if item.type == "span") - messages = json.loads(invoke_span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + messages = json.loads(invoke_span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) assert len(messages) == 1 assert messages[0]["role"] == "user" assert messages[0]["content"] == [ @@ -1449,7 +1442,7 @@ def test_generate_content_with_content_object( def test_generate_content_with_dict_format( - sentry_init, capture_events, mock_genai_client + sentry_init, capture_items, mock_genai_client ): """Test generate_content with dict format input (ContentDict).""" sentry_init( @@ -1457,7 +1450,7 @@ def test_generate_content_with_dict_format( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("span") mock_http_response = create_mock_http_response(EXAMPLE_API_RESPONSE_JSON) @@ -1472,10 +1465,9 @@ def test_generate_content_with_dict_format( model="gemini-1.5-flash", contents=contents, config=create_test_config() ) - (event,) = events - invoke_span = event["spans"][0] + invoke_span = next(item.payload for item in items if item.type == "span") - messages = json.loads(invoke_span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + messages = json.loads(invoke_span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) assert len(messages) == 1 assert messages[0]["role"] == "user" assert messages[0]["content"] == [ @@ -1483,16 +1475,14 @@ def test_generate_content_with_dict_format( ] -def test_generate_content_with_file_data( - sentry_init, capture_events, mock_genai_client -): +def test_generate_content_with_file_data(sentry_init, capture_items, mock_genai_client): """Test generate_content with file_data (external file reference).""" sentry_init( integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("span") mock_http_response = create_mock_http_response(EXAMPLE_API_RESPONSE_JSON) @@ -1516,10 +1506,9 @@ def test_generate_content_with_file_data( model="gemini-1.5-flash", contents=content, config=create_test_config() ) - (event,) = events - invoke_span = event["spans"][0] + invoke_span = next(item.payload for item in items if item.type == "span") - messages = json.loads(invoke_span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + messages = json.loads(invoke_span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) assert len(messages) == 1 assert messages[0]["role"] == "user" assert len(messages[0]["content"]) == 2 @@ -1534,7 +1523,7 @@ def test_generate_content_with_file_data( def test_generate_content_with_inline_data( - sentry_init, capture_events, mock_genai_client + sentry_init, capture_items, mock_genai_client ): """Test generate_content with inline_data (binary data).""" sentry_init( @@ -1542,7 +1531,7 @@ def test_generate_content_with_inline_data( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("span") mock_http_response = create_mock_http_response(EXAMPLE_API_RESPONSE_JSON) @@ -1565,10 +1554,9 @@ def test_generate_content_with_inline_data( model="gemini-1.5-flash", contents=content, config=create_test_config() ) - (event,) = events - invoke_span = event["spans"][0] + invoke_span = next(item.payload for item in items if item.type == "span") - messages = json.loads(invoke_span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + messages = json.loads(invoke_span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) assert len(messages) == 1 assert messages[0]["role"] == "user" assert len(messages[0]["content"]) == 2 @@ -1580,7 +1568,7 @@ def test_generate_content_with_inline_data( def test_generate_content_with_function_response( - sentry_init, capture_events, mock_genai_client + sentry_init, capture_items, mock_genai_client ): """Test generate_content with function_response (tool result).""" sentry_init( @@ -1588,7 +1576,7 @@ def test_generate_content_with_function_response( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("span") mock_http_response = create_mock_http_response(EXAMPLE_API_RESPONSE_JSON) @@ -1622,10 +1610,9 @@ def test_generate_content_with_function_response( model="gemini-1.5-flash", contents=contents, config=create_test_config() ) - (event,) = events - invoke_span = event["spans"][0] + invoke_span = next(item.payload for item in items if item.type == "span") - messages = json.loads(invoke_span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + messages = json.loads(invoke_span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) assert len(messages) == 1 # First message is user message assert messages[0]["role"] == "tool" @@ -1635,7 +1622,7 @@ def test_generate_content_with_function_response( def test_generate_content_with_mixed_string_and_content( - sentry_init, capture_events, mock_genai_client + sentry_init, capture_items, mock_genai_client ): """Test generate_content with mixed string and Content objects in list.""" sentry_init( @@ -1643,7 +1630,7 @@ def test_generate_content_with_mixed_string_and_content( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("span") mock_http_response = create_mock_http_response(EXAMPLE_API_RESPONSE_JSON) @@ -1668,10 +1655,9 @@ def test_generate_content_with_mixed_string_and_content( model="gemini-1.5-flash", contents=contents, config=create_test_config() ) - (event,) = events - invoke_span = event["spans"][0] + invoke_span = next(item.payload for item in items if item.type == "span") - messages = json.loads(invoke_span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + messages = json.loads(invoke_span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) assert len(messages) == 1 # User message assert messages[0]["role"] == "user" @@ -1679,7 +1665,7 @@ def test_generate_content_with_mixed_string_and_content( def test_generate_content_with_part_object_directly( - sentry_init, capture_events, mock_genai_client + sentry_init, capture_items, mock_genai_client ): """Test generate_content with Part object directly (not wrapped in Content).""" sentry_init( @@ -1687,7 +1673,7 @@ def test_generate_content_with_part_object_directly( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("span") mock_http_response = create_mock_http_response(EXAMPLE_API_RESPONSE_JSON) @@ -1702,17 +1688,16 @@ def test_generate_content_with_part_object_directly( model="gemini-1.5-flash", contents=part, config=create_test_config() ) - (event,) = events - invoke_span = event["spans"][0] + invoke_span = next(item.payload for item in items if item.type == "span") - messages = json.loads(invoke_span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + messages = json.loads(invoke_span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) assert len(messages) == 1 assert messages[0]["role"] == "user" assert messages[0]["content"] == [{"text": "Direct Part object", "type": "text"}] def test_generate_content_with_list_of_dicts( - sentry_init, capture_events, mock_genai_client + sentry_init, capture_items, mock_genai_client ): """ Test generate_content with list of dict format inputs. @@ -1726,7 +1711,7 @@ def test_generate_content_with_list_of_dicts( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("span") mock_http_response = create_mock_http_response(EXAMPLE_API_RESPONSE_JSON) @@ -1745,17 +1730,16 @@ def test_generate_content_with_list_of_dicts( model="gemini-1.5-flash", contents=contents, config=create_test_config() ) - (event,) = events - invoke_span = event["spans"][0] + invoke_span = next(item.payload for item in items if item.type == "span") - messages = json.loads(invoke_span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + messages = json.loads(invoke_span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) assert len(messages) == 1 assert messages[0]["role"] == "user" assert messages[0]["content"] == [{"text": "Second user message", "type": "text"}] def test_generate_content_with_dict_inline_data( - sentry_init, capture_events, mock_genai_client + sentry_init, capture_items, mock_genai_client ): """Test generate_content with dict format containing inline_data.""" sentry_init( @@ -1763,7 +1747,7 @@ def test_generate_content_with_dict_inline_data( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("span") mock_http_response = create_mock_http_response(EXAMPLE_API_RESPONSE_JSON) @@ -1784,10 +1768,9 @@ def test_generate_content_with_dict_inline_data( model="gemini-1.5-flash", contents=contents, config=create_test_config() ) - (event,) = events - invoke_span = event["spans"][0] + invoke_span = next(item.payload for item in items if item.type == "span") - messages = json.loads(invoke_span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + messages = json.loads(invoke_span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) assert len(messages) == 1 assert messages[0]["role"] == "user" assert len(messages[0]["content"]) == 2 @@ -1801,14 +1784,14 @@ def test_generate_content_with_dict_inline_data( def test_generate_content_without_parts_property_inline_data( - sentry_init, capture_events, mock_genai_client + sentry_init, capture_items, mock_genai_client ): sentry_init( integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("span") mock_http_response = create_mock_http_response(EXAMPLE_API_RESPONSE_JSON) @@ -1825,10 +1808,9 @@ def test_generate_content_without_parts_property_inline_data( model="gemini-1.5-flash", contents=contents, config=create_test_config() ) - (event,) = events - invoke_span = event["spans"][0] + invoke_span = next(item.payload for item in items if item.type == "span") - messages = json.loads(invoke_span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + messages = json.loads(invoke_span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) assert len(messages) == 1 @@ -1845,14 +1827,14 @@ def test_generate_content_without_parts_property_inline_data( def test_generate_content_without_parts_property_inline_data_and_binary_data_within_string( - sentry_init, capture_events, mock_genai_client + sentry_init, capture_items, mock_genai_client ): sentry_init( integrations=[GoogleGenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("span") mock_http_response = create_mock_http_response(EXAMPLE_API_RESPONSE_JSON) @@ -1874,10 +1856,9 @@ def test_generate_content_without_parts_property_inline_data_and_binary_data_wit model="gemini-1.5-flash", contents=contents, config=create_test_config() ) - (event,) = events - invoke_span = event["spans"][0] + invoke_span = next(item.payload for item in items if item.type == "span") - messages = json.loads(invoke_span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + messages = json.loads(invoke_span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) assert len(messages) == 1 assert messages[0]["role"] == "user" diff --git a/tests/integrations/huggingface_hub/test_huggingface_hub.py b/tests/integrations/huggingface_hub/test_huggingface_hub.py index 9dd15ca4b5..031627906a 100644 --- a/tests/integrations/huggingface_hub/test_huggingface_hub.py +++ b/tests/integrations/huggingface_hub/test_huggingface_hub.py @@ -471,7 +471,7 @@ def mock_hf_chat_completion_api_streaming_tools(httpx_mock): @pytest.mark.parametrize("include_prompts", [True, False]) def test_text_generation( sentry_init: "Any", - capture_events: "Any", + capture_items: "Any", send_default_pii: "Any", include_prompts: "Any", mock_hf_text_generation_api: "Any", @@ -481,7 +481,7 @@ def test_text_generation( send_default_pii=send_default_pii, integrations=[HuggingfaceHubIntegration(include_prompts=include_prompts)], ) - events = capture_events() + items = capture_items("transaction", "span") client = InferenceClient(model="test-model") @@ -492,23 +492,22 @@ def test_text_generation( details=True, ) - (transaction,) = events - + spans = [item.payload for item in items if item.type == "span"] span = None - for sp in transaction["spans"]: - if sp["op"].startswith("gen_ai"): + for sp in spans: + if sp["attributes"]["sentry.op"].startswith("gen_ai"): assert span is None, "there is exactly one gen_ai span" span = sp else: # there should be no other spans, just the gen_ai span # and optionally some http.client spans from talking to the hf api - assert sp["op"] == "http.client" + assert sp["attributes"]["sentry.op"] == "http.client" assert span is not None - assert span["op"] == "gen_ai.text_completion" - assert span["description"] == "text_completion test-model" - assert span["origin"] == "auto.ai.huggingface_hub" + assert span["attributes"]["sentry.op"] == "gen_ai.text_completion" + assert span["name"] == "text_completion test-model" + assert span["attributes"]["sentry.origin"] == "auto.ai.huggingface_hub" expected_data = { "gen_ai.operation.name": "text_completion", @@ -516,6 +515,14 @@ def test_text_generation( "gen_ai.response.finish_reasons": "length", "gen_ai.response.streaming": False, "gen_ai.usage.total_tokens": 10, + "sentry.environment": "production", + "sentry.op": "gen_ai.text_completion", + "sentry.origin": "auto.ai.huggingface_hub", + "sentry.release": mock.ANY, + "sentry.sdk.name": "sentry.python", + "sentry.sdk.version": "2.58.0", + "sentry.segment.id": mock.ANY, + "sentry.segment.name": "test", "thread.id": mock.ANY, "thread.name": mock.ANY, } @@ -528,10 +535,10 @@ def test_text_generation( assert "gen_ai.request.messages" not in expected_data assert "gen_ai.response.text" not in expected_data - assert span["data"] == expected_data + assert span["attributes"] == expected_data # text generation does not set the response model - assert "gen_ai.response.model" not in span["data"] + assert "gen_ai.response.model" not in span["attributes"] @pytest.mark.httpx_mock(assert_all_requests_were_expected=False) @@ -539,7 +546,7 @@ def test_text_generation( @pytest.mark.parametrize("include_prompts", [True, False]) def test_text_generation_streaming( sentry_init: "Any", - capture_events: "Any", + capture_items: "Any", send_default_pii: "Any", include_prompts: "Any", mock_hf_text_generation_api_streaming: "Any", @@ -549,7 +556,7 @@ def test_text_generation_streaming( send_default_pii=send_default_pii, integrations=[HuggingfaceHubIntegration(include_prompts=include_prompts)], ) - events = capture_events() + items = capture_items("transaction", "span") client = InferenceClient(model="test-model") @@ -561,23 +568,22 @@ def test_text_generation_streaming( ): pass - (transaction,) = events - + spans = [item.payload for item in items if item.type == "span"] span = None - for sp in transaction["spans"]: - if sp["op"].startswith("gen_ai"): + for sp in spans: + if sp["attributes"]["sentry.op"].startswith("gen_ai"): assert span is None, "there is exactly one gen_ai span" span = sp else: # there should be no other spans, just the gen_ai span # and optionally some http.client spans from talking to the hf api - assert sp["op"] == "http.client" + assert sp["attributes"]["sentry.op"] == "http.client" assert span is not None - assert span["op"] == "gen_ai.text_completion" - assert span["description"] == "text_completion test-model" - assert span["origin"] == "auto.ai.huggingface_hub" + assert span["attributes"]["sentry.op"] == "gen_ai.text_completion" + assert span["name"] == "text_completion test-model" + assert span["attributes"]["sentry.origin"] == "auto.ai.huggingface_hub" expected_data = { "gen_ai.operation.name": "text_completion", @@ -585,6 +591,14 @@ def test_text_generation_streaming( "gen_ai.response.finish_reasons": "length", "gen_ai.response.streaming": True, "gen_ai.usage.total_tokens": 10, + "sentry.environment": "production", + "sentry.op": "gen_ai.text_completion", + "sentry.origin": "auto.ai.huggingface_hub", + "sentry.release": mock.ANY, + "sentry.sdk.name": "sentry.python", + "sentry.sdk.version": mock.ANY, + "sentry.segment.id": mock.ANY, + "sentry.segment.name": "test", "thread.id": mock.ANY, "thread.name": mock.ANY, } @@ -597,10 +611,10 @@ def test_text_generation_streaming( assert "gen_ai.request.messages" not in expected_data assert "gen_ai.response.text" not in expected_data - assert span["data"] == expected_data + assert span["attributes"] == expected_data # text generation does not set the response model - assert "gen_ai.response.model" not in span["data"] + assert "gen_ai.response.model" not in span["attributes"] @pytest.mark.httpx_mock(assert_all_requests_were_expected=False) @@ -608,7 +622,7 @@ def test_text_generation_streaming( @pytest.mark.parametrize("include_prompts", [True, False]) def test_chat_completion( sentry_init: "Any", - capture_events: "Any", + capture_items: "Any", send_default_pii: "Any", include_prompts: "Any", mock_hf_chat_completion_api: "Any", @@ -618,7 +632,7 @@ def test_chat_completion( send_default_pii=send_default_pii, integrations=[HuggingfaceHubIntegration(include_prompts=include_prompts)], ) - events = capture_events() + items = capture_items("transaction", "span") client = get_hf_provider_inference_client() @@ -628,23 +642,22 @@ def test_chat_completion( stream=False, ) - (transaction,) = events - + spans = [item.payload for item in items if item.type == "span"] span = None - for sp in transaction["spans"]: - if sp["op"].startswith("gen_ai"): + for sp in spans: + if sp["attributes"]["sentry.op"].startswith("gen_ai"): assert span is None, "there is exactly one gen_ai span" span = sp else: # there should be no other spans, just the gen_ai span # and optionally some http.client spans from talking to the hf api - assert sp["op"] == "http.client" + assert sp["attributes"]["sentry.op"] == "http.client" assert span is not None - assert span["op"] == "gen_ai.chat" - assert span["description"] == "chat test-model" - assert span["origin"] == "auto.ai.huggingface_hub" + assert span["attributes"]["sentry.op"] == "gen_ai.chat" + assert span["name"] == "chat test-model" + assert span["attributes"]["sentry.origin"] == "auto.ai.huggingface_hub" expected_data = { "gen_ai.operation.name": "chat", @@ -655,6 +668,14 @@ def test_chat_completion( "gen_ai.usage.input_tokens": 10, "gen_ai.usage.output_tokens": 8, "gen_ai.usage.total_tokens": 18, + "sentry.environment": "production", + "sentry.op": "gen_ai.chat", + "sentry.origin": "auto.ai.huggingface_hub", + "sentry.release": mock.ANY, + "sentry.sdk.name": "sentry.python", + "sentry.sdk.version": mock.ANY, + "sentry.segment.id": mock.ANY, + "sentry.segment.name": "test", "thread.id": mock.ANY, "thread.name": mock.ANY, } @@ -671,7 +692,7 @@ def test_chat_completion( assert "gen_ai.request.messages" not in expected_data assert "gen_ai.response.text" not in expected_data - assert span["data"] == expected_data + assert span["attributes"] == expected_data @pytest.mark.httpx_mock(assert_all_requests_were_expected=False) @@ -679,7 +700,7 @@ def test_chat_completion( @pytest.mark.parametrize("include_prompts", [True, False]) def test_chat_completion_streaming( sentry_init: "Any", - capture_events: "Any", + capture_items: "Any", send_default_pii: "Any", include_prompts: "Any", mock_hf_chat_completion_api_streaming: "Any", @@ -689,7 +710,7 @@ def test_chat_completion_streaming( send_default_pii=send_default_pii, integrations=[HuggingfaceHubIntegration(include_prompts=include_prompts)], ) - events = capture_events() + items = capture_items("transaction", "span") client = get_hf_provider_inference_client() @@ -701,23 +722,22 @@ def test_chat_completion_streaming( ) ) - (transaction,) = events - + spans = [item.payload for item in items if item.type == "span"] span = None - for sp in transaction["spans"]: - if sp["op"].startswith("gen_ai"): + for sp in spans: + if sp["attributes"]["sentry.op"].startswith("gen_ai"): assert span is None, "there is exactly one gen_ai span" span = sp else: # there should be no other spans, just the gen_ai span # and optionally some http.client spans from talking to the hf api - assert sp["op"] == "http.client" + assert sp["attributes"]["sentry.op"] == "http.client" assert span is not None - assert span["op"] == "gen_ai.chat" - assert span["description"] == "chat test-model" - assert span["origin"] == "auto.ai.huggingface_hub" + assert span["attributes"]["sentry.op"] == "gen_ai.chat" + assert span["name"] == "chat test-model" + assert span["attributes"]["sentry.origin"] == "auto.ai.huggingface_hub" expected_data = { "gen_ai.operation.name": "chat", @@ -725,6 +745,14 @@ def test_chat_completion_streaming( "gen_ai.response.finish_reasons": "stop", "gen_ai.response.model": "test-model-123", "gen_ai.response.streaming": True, + "sentry.environment": "production", + "sentry.op": "gen_ai.chat", + "sentry.origin": "auto.ai.huggingface_hub", + "sentry.release": mock.ANY, + "sentry.sdk.name": "sentry.python", + "sentry.sdk.version": mock.ANY, + "sentry.segment.id": mock.ANY, + "sentry.segment.name": "test", "thread.id": mock.ANY, "thread.name": mock.ANY, } @@ -744,15 +772,15 @@ def test_chat_completion_streaming( assert "gen_ai.request.messages" not in expected_data assert "gen_ai.response.text" not in expected_data - assert span["data"] == expected_data + assert span["attributes"] == expected_data @pytest.mark.httpx_mock(assert_all_requests_were_expected=False) def test_chat_completion_api_error( - sentry_init: "Any", capture_events: "Any", mock_hf_api_with_errors: "Any" + sentry_init: "Any", capture_items: "Any", mock_hf_api_with_errors: "Any" ) -> None: - sentry_init(traces_sample_rate=1.0) - events = capture_events() + sentry_init(traces_sample_rate=1.0, _experiments={"gen_ai_as_v2_spans": True}) + items = capture_items("event", "transaction", "span") client = get_hf_provider_inference_client() @@ -762,32 +790,29 @@ def test_chat_completion_api_error( messages=[{"role": "user", "content": "Hello!"}], ) - ( - error, - transaction, - ) = events - + (error,) = (item.payload for item in items if item.type == "event") assert error["exception"]["values"][0]["mechanism"]["type"] == "huggingface_hub" assert not error["exception"]["values"][0]["mechanism"]["handled"] + spans = [item.payload for item in items if item.type == "span"] span = None - for sp in transaction["spans"]: - if sp["op"].startswith("gen_ai"): + for sp in spans: + if sp["attributes"]["sentry.op"].startswith("gen_ai"): assert span is None, "there is exactly one gen_ai span" span = sp else: # there should be no other spans, just the gen_ai span # and optionally some http.client spans from talking to the hf api - assert sp["op"] == "http.client" + assert sp["attributes"]["sentry.op"] == "http.client" assert span is not None - assert span["op"] == "gen_ai.chat" - assert span["description"] == "chat test-model" - assert span["origin"] == "auto.ai.huggingface_hub" - assert span["status"] == "internal_error" - assert span.get("tags", {}).get("status") == "internal_error" + assert span["attributes"]["sentry.op"] == "gen_ai.chat" + assert span["name"] == "chat test-model" + assert span["attributes"]["sentry.origin"] == "auto.ai.huggingface_hub" + assert span["status"] == "error" + (transaction,) = (item.payload for item in items if item.type == "transaction") assert ( error["contexts"]["trace"]["trace_id"] == transaction["contexts"]["trace"]["trace_id"] @@ -795,18 +820,26 @@ def test_chat_completion_api_error( expected_data = { "gen_ai.operation.name": "chat", "gen_ai.request.model": "test-model", + "sentry.environment": "production", + "sentry.op": "gen_ai.chat", + "sentry.origin": "auto.ai.huggingface_hub", + "sentry.release": mock.ANY, + "sentry.sdk.name": "sentry.python", + "sentry.sdk.version": mock.ANY, + "sentry.segment.id": mock.ANY, + "sentry.segment.name": "test", "thread.id": mock.ANY, "thread.name": mock.ANY, } - assert span["data"] == expected_data + assert span["attributes"] == expected_data @pytest.mark.httpx_mock(assert_all_requests_were_expected=False) def test_span_status_error( - sentry_init: "Any", capture_events: "Any", mock_hf_api_with_errors: "Any" + sentry_init: "Any", capture_items: "Any", mock_hf_api_with_errors: "Any" ) -> None: - sentry_init(traces_sample_rate=1.0) - events = capture_events() + sentry_init(traces_sample_rate=1.0, _experiments={"gen_ai_as_v2_spans": True}) + items = capture_items("event", "transaction", "span") client = get_hf_provider_inference_client() @@ -816,22 +849,22 @@ def test_span_status_error( messages=[{"role": "user", "content": "Hello!"}], ) - (error, transaction) = events + (error,) = [item.payload for item in items if item.type == "event"] assert error["level"] == "error" + spans = [item.payload for item in items if item.type == "span"] span = None - for sp in transaction["spans"]: - if sp["op"].startswith("gen_ai"): + for sp in spans: + if sp["attributes"]["sentry.op"].startswith("gen_ai"): assert span is None, "there is exactly one gen_ai span" span = sp else: # there should be no other spans, just the gen_ai span # and optionally some http.client spans from talking to the hf api - assert sp["op"] == "http.client" + assert sp["attributes"]["sentry.op"] == "http.client" assert span is not None - assert span["status"] == "internal_error" - assert span["tags"]["status"] == "internal_error" + assert span["status"] == "error" @pytest.mark.httpx_mock(assert_all_requests_were_expected=False) @@ -839,7 +872,7 @@ def test_span_status_error( @pytest.mark.parametrize("include_prompts", [True, False]) def test_chat_completion_with_tools( sentry_init: "Any", - capture_events: "Any", + capture_items: "Any", send_default_pii: "Any", include_prompts: "Any", mock_hf_chat_completion_api_tools: "Any", @@ -849,7 +882,7 @@ def test_chat_completion_with_tools( send_default_pii=send_default_pii, integrations=[HuggingfaceHubIntegration(include_prompts=include_prompts)], ) - events = capture_events() + items = capture_items("transaction", "span") client = get_hf_provider_inference_client() @@ -875,23 +908,22 @@ def test_chat_completion_with_tools( tool_choice="auto", ) - (transaction,) = events - + spans = [item.payload for item in items if item.type == "span"] span = None - for sp in transaction["spans"]: - if sp["op"].startswith("gen_ai"): + for sp in spans: + if sp["attributes"]["sentry.op"].startswith("gen_ai"): assert span is None, "there is exactly one gen_ai span" span = sp else: # there should be no other spans, just the gen_ai span # and optionally some http.client spans from talking to the hf api - assert sp["op"] == "http.client" + assert sp["attributes"]["sentry.op"] == "http.client" assert span is not None - assert span["op"] == "gen_ai.chat" - assert span["description"] == "chat test-model" - assert span["origin"] == "auto.ai.huggingface_hub" + assert span["attributes"]["sentry.op"] == "gen_ai.chat" + assert span["name"] == "chat test-model" + assert span["attributes"]["sentry.origin"] == "auto.ai.huggingface_hub" expected_data = { "gen_ai.operation.name": "chat", @@ -902,6 +934,14 @@ def test_chat_completion_with_tools( "gen_ai.usage.input_tokens": 10, "gen_ai.usage.output_tokens": 8, "gen_ai.usage.total_tokens": 18, + "sentry.environment": "production", + "sentry.op": "gen_ai.chat", + "sentry.origin": "auto.ai.huggingface_hub", + "sentry.release": mock.ANY, + "sentry.sdk.name": "sentry.python", + "sentry.sdk.version": mock.ANY, + "sentry.segment.id": mock.ANY, + "sentry.segment.name": "test", "thread.id": mock.ANY, "thread.name": mock.ANY, } @@ -919,7 +959,7 @@ def test_chat_completion_with_tools( assert "gen_ai.response.text" not in expected_data assert "gen_ai.response.tool_calls" not in expected_data - assert span["data"] == expected_data + assert span["attributes"] == expected_data @pytest.mark.httpx_mock(assert_all_requests_were_expected=False) @@ -927,7 +967,7 @@ def test_chat_completion_with_tools( @pytest.mark.parametrize("include_prompts", [True, False]) def test_chat_completion_streaming_with_tools( sentry_init: "Any", - capture_events: "Any", + capture_items: "Any", send_default_pii: "Any", include_prompts: "Any", mock_hf_chat_completion_api_streaming_tools: "Any", @@ -937,7 +977,7 @@ def test_chat_completion_streaming_with_tools( send_default_pii=send_default_pii, integrations=[HuggingfaceHubIntegration(include_prompts=include_prompts)], ) - events = capture_events() + items = capture_items("transaction", "span") client = get_hf_provider_inference_client() @@ -966,23 +1006,22 @@ def test_chat_completion_streaming_with_tools( ) ) - (transaction,) = events - + spans = [item.payload for item in items if item.type == "span"] span = None - for sp in transaction["spans"]: - if sp["op"].startswith("gen_ai"): + for sp in spans: + if sp["attributes"]["sentry.op"].startswith("gen_ai"): assert span is None, "there is exactly one gen_ai span" span = sp else: # there should be no other spans, just the gen_ai span # and optionally some http.client spans from talking to the hf api - assert sp["op"] == "http.client" + assert sp["attributes"]["sentry.op"] == "http.client" assert span is not None - assert span["op"] == "gen_ai.chat" - assert span["description"] == "chat test-model" - assert span["origin"] == "auto.ai.huggingface_hub" + assert span["attributes"]["sentry.op"] == "gen_ai.chat" + assert span["name"] == "chat test-model" + assert span["attributes"]["sentry.origin"] == "auto.ai.huggingface_hub" expected_data = { "gen_ai.operation.name": "chat", @@ -991,6 +1030,14 @@ def test_chat_completion_streaming_with_tools( "gen_ai.response.finish_reasons": "tool_calls", "gen_ai.response.model": "test-model-123", "gen_ai.response.streaming": True, + "sentry.environment": "production", + "sentry.op": "gen_ai.chat", + "sentry.origin": "auto.ai.huggingface_hub", + "sentry.release": mock.ANY, + "sentry.sdk.name": "sentry.python", + "sentry.sdk.version": mock.ANY, + "sentry.segment.id": mock.ANY, + "sentry.segment.name": "test", "thread.id": mock.ANY, "thread.name": mock.ANY, } @@ -1014,4 +1061,4 @@ def test_chat_completion_streaming_with_tools( assert "gen_ai.response.text" not in expected_data assert "gen_ai.response.tool_calls" not in expected_data - assert span["data"] == expected_data + assert span["attributes"] == expected_data diff --git a/tests/integrations/langchain/test_langchain.py b/tests/integrations/langchain/test_langchain.py index 498a5d6f4a..3c1d9bef54 100644 --- a/tests/integrations/langchain/test_langchain.py +++ b/tests/integrations/langchain/test_langchain.py @@ -97,7 +97,7 @@ def _llm_type(self) -> str: def test_langchain_text_completion( sentry_init, - capture_events, + capture_items, get_model_response, ): sentry_init( @@ -109,7 +109,7 @@ def test_langchain_text_completion( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") model_response = get_model_response( Completion( @@ -149,25 +149,29 @@ def test_langchain_text_completion( input_text = "What is the capital of France?" model.invoke(input_text, config={"run_name": "my-snazzy-pipeline"}) - tx = events[0] + tx = next(item.payload for item in items if item.type == "transaction") assert tx["type"] == "transaction" + spans = [item.payload for item in items if item.type == "span"] llm_spans = [ span - for span in tx.get("spans", []) - if span.get("op") == "gen_ai.text_completion" + for span in spans + if span["attributes"].get("sentry.op") == "gen_ai.text_completion" ] assert len(llm_spans) > 0 llm_span = llm_spans[0] - assert llm_span["description"] == "text_completion gpt-3.5-turbo" - assert llm_span["data"]["gen_ai.system"] == "openai" - assert llm_span["data"]["gen_ai.pipeline.name"] == "my-snazzy-pipeline" - assert llm_span["data"]["gen_ai.request.model"] == "gpt-3.5-turbo" - assert llm_span["data"]["gen_ai.response.text"] == "The capital of France is Paris." - assert llm_span["data"]["gen_ai.usage.total_tokens"] == 25 - assert llm_span["data"]["gen_ai.usage.input_tokens"] == 10 - assert llm_span["data"]["gen_ai.usage.output_tokens"] == 15 + assert llm_span["name"] == "text_completion gpt-3.5-turbo" + assert llm_span["attributes"]["gen_ai.system"] == "openai" + assert llm_span["attributes"]["gen_ai.pipeline.name"] == "my-snazzy-pipeline" + assert llm_span["attributes"]["gen_ai.request.model"] == "gpt-3.5-turbo" + assert ( + llm_span["attributes"]["gen_ai.response.text"] + == "The capital of France is Paris." + ) + assert llm_span["attributes"]["gen_ai.usage.total_tokens"] == 25 + assert llm_span["attributes"]["gen_ai.usage.input_tokens"] == 10 + assert llm_span["attributes"]["gen_ai.usage.output_tokens"] == 15 @pytest.mark.skipif( @@ -196,7 +200,7 @@ def test_langchain_text_completion( ) def test_langchain_create_agent( sentry_init, - capture_events, + capture_items, send_default_pii, include_prompts, system_instructions_content, @@ -213,7 +217,7 @@ def test_langchain_create_agent( traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("transaction", "span") model_response = get_model_response( nonstreaming_responses_model_response, @@ -250,22 +254,23 @@ def test_langchain_create_agent( }, ) - tx = events[0] + tx = next(item.payload for item in items if item.type == "transaction") assert tx["type"] == "transaction" assert tx["contexts"]["trace"]["origin"] == "manual" - chat_spans = list(x for x in tx["spans"] if x["op"] == "gen_ai.chat") + spans = [item.payload for item in items if item.type == "span"] + chat_spans = list(x for x in spans if x["attributes"]["sentry.op"] == "gen_ai.chat") assert len(chat_spans) == 1 - assert chat_spans[0]["origin"] == "auto.ai.langchain" + assert chat_spans[0]["attributes"]["sentry.origin"] == "auto.ai.langchain" - assert chat_spans[0]["data"]["gen_ai.system"] == "openai-chat" - assert chat_spans[0]["data"]["gen_ai.usage.input_tokens"] == 10 - assert chat_spans[0]["data"]["gen_ai.usage.output_tokens"] == 20 - assert chat_spans[0]["data"]["gen_ai.usage.total_tokens"] == 30 + assert chat_spans[0]["attributes"]["gen_ai.system"] == "openai-chat" + assert chat_spans[0]["attributes"]["gen_ai.usage.input_tokens"] == 10 + assert chat_spans[0]["attributes"]["gen_ai.usage.output_tokens"] == 20 + assert chat_spans[0]["attributes"]["gen_ai.usage.total_tokens"] == 30 if send_default_pii and include_prompts: assert ( - chat_spans[0]["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] + chat_spans[0]["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hello, how can I help you?" ) @@ -276,7 +281,9 @@ def test_langchain_create_agent( "type": "text", "content": "You are very powerful assistant, but don't know current events", } - ] == json.loads(chat_spans[0]["data"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS]) + ] == json.loads( + chat_spans[0]["attributes"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS] + ) else: assert [ { @@ -287,11 +294,17 @@ def test_langchain_create_agent( "type": "text", "content": "Be concise and clear.", }, - ] == json.loads(chat_spans[0]["data"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS]) + ] == json.loads( + chat_spans[0]["attributes"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS] + ) else: - assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in chat_spans[0].get("data", {}) - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in chat_spans[0].get("data", {}) - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in chat_spans[0].get("data", {}) + assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in chat_spans[0].get( + "attributes", {} + ) + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in chat_spans[0].get( + "attributes", {} + ) + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in chat_spans[0].get("attributes", {}) @pytest.mark.skipif( @@ -309,7 +322,7 @@ def test_langchain_create_agent( ) def test_tool_execution_span( sentry_init, - capture_events, + capture_items, send_default_pii, include_prompts, get_model_response, @@ -324,7 +337,7 @@ def test_tool_execution_span( traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("transaction", "span") responses = responses_tool_call_model_responses( tool_name="get_word_length", @@ -400,60 +413,71 @@ def test_tool_execution_span( }, ) - tx = events[0] + tx = next(item.payload for item in items if item.type == "transaction") assert tx["type"] == "transaction" assert tx["contexts"]["trace"]["origin"] == "manual" - chat_spans = list(x for x in tx["spans"] if x["op"] == "gen_ai.chat") + spans = [item.payload for item in items if item.type == "span"] + chat_spans = list(x for x in spans if x["attributes"]["sentry.op"] == "gen_ai.chat") assert len(chat_spans) == 2 - tool_exec_spans = list(x for x in tx["spans"] if x["op"] == "gen_ai.execute_tool") + tool_exec_spans = list( + x for x in spans if x["attributes"]["sentry.op"] == "gen_ai.execute_tool" + ) assert len(tool_exec_spans) == 1 tool_exec_span = tool_exec_spans[0] - assert chat_spans[0]["origin"] == "auto.ai.langchain" - assert chat_spans[1]["origin"] == "auto.ai.langchain" - assert tool_exec_span["origin"] == "auto.ai.langchain" + assert chat_spans[0]["attributes"]["sentry.origin"] == "auto.ai.langchain" + assert chat_spans[1]["attributes"]["sentry.origin"] == "auto.ai.langchain" + assert tool_exec_span["attributes"]["sentry.origin"] == "auto.ai.langchain" - assert chat_spans[0]["data"]["gen_ai.usage.input_tokens"] == 142 - assert chat_spans[0]["data"]["gen_ai.usage.output_tokens"] == 50 - assert chat_spans[0]["data"]["gen_ai.usage.total_tokens"] == 192 - assert chat_spans[0]["data"]["gen_ai.system"] == "openai-chat" + assert chat_spans[0]["attributes"]["gen_ai.usage.input_tokens"] == 142 + assert chat_spans[0]["attributes"]["gen_ai.usage.output_tokens"] == 50 + assert chat_spans[0]["attributes"]["gen_ai.usage.total_tokens"] == 192 + assert chat_spans[0]["attributes"]["gen_ai.system"] == "openai-chat" - assert chat_spans[1]["data"]["gen_ai.usage.input_tokens"] == 89 - assert chat_spans[1]["data"]["gen_ai.usage.output_tokens"] == 28 - assert chat_spans[1]["data"]["gen_ai.usage.total_tokens"] == 117 - assert chat_spans[1]["data"]["gen_ai.system"] == "openai-chat" + assert chat_spans[1]["attributes"]["gen_ai.usage.input_tokens"] == 89 + assert chat_spans[1]["attributes"]["gen_ai.usage.output_tokens"] == 28 + assert chat_spans[1]["attributes"]["gen_ai.usage.total_tokens"] == 117 + assert chat_spans[1]["attributes"]["gen_ai.system"] == "openai-chat" if send_default_pii and include_prompts: - assert "word" in tool_exec_span["data"][SPANDATA.GEN_AI_TOOL_INPUT] + assert "word" in tool_exec_span["attributes"][SPANDATA.GEN_AI_TOOL_INPUT] - assert "5" in chat_spans[1]["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] + assert "5" in chat_spans[1]["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] # Verify tool calls are recorded when PII is enabled - assert SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS in chat_spans[0].get("data", {}), ( + assert SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS in chat_spans[0].get( + "attributes", {} + ), ( "Tool calls should be recorded when send_default_pii=True and include_prompts=True" ) - tool_calls_data = chat_spans[0]["data"][SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS] + tool_calls_data = chat_spans[0]["attributes"][ + SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS + ] assert isinstance(tool_calls_data, str) assert "get_word_length" in tool_calls_data else: - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in chat_spans[0].get("data", {}) - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in chat_spans[0].get("data", {}) - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in chat_spans[1].get("data", {}) - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in chat_spans[1].get("data", {}) - assert SPANDATA.GEN_AI_TOOL_INPUT not in tool_exec_span.get("data", {}) - assert SPANDATA.GEN_AI_TOOL_OUTPUT not in tool_exec_span.get("data", {}) + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in chat_spans[0].get( + "attributes", {} + ) + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in chat_spans[0].get("attributes", {}) + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in chat_spans[1].get( + "attributes", {} + ) + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in chat_spans[1].get("attributes", {}) + assert SPANDATA.GEN_AI_TOOL_INPUT not in tool_exec_span.get("attributes", {}) + assert SPANDATA.GEN_AI_TOOL_OUTPUT not in tool_exec_span.get("attributes", {}) # Verify tool calls are NOT recorded when PII is disabled assert SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS not in chat_spans[0].get( - "data", {} + "attributes", {} ), ( f"Tool calls should NOT be recorded when send_default_pii={send_default_pii} " f"and include_prompts={include_prompts}" ) assert SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS not in chat_spans[1].get( - "data", {} + "attributes", {} ), ( f"Tool calls should NOT be recorded when send_default_pii={send_default_pii} " f"and include_prompts={include_prompts}" @@ -461,7 +485,7 @@ def test_tool_execution_span( # Verify that available tools are always recorded regardless of PII settings for chat_span in chat_spans: - tools_data = chat_span["data"][SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS] + tools_data = chat_span["attributes"][SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS] assert "get_word_length" in tools_data @@ -488,7 +512,7 @@ def test_tool_execution_span( ) def test_langchain_openai_tools_agent( sentry_init, - capture_events, + capture_items, send_default_pii, include_prompts, system_instructions_content, @@ -505,7 +529,7 @@ def test_langchain_openai_tools_agent( traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("transaction", "span") prompt = ChatPromptTemplate.from_messages( [ @@ -700,40 +724,47 @@ def test_langchain_openai_tools_agent( with start_transaction(): list(agent_executor.stream({"input": "How many letters in the word eudca"})) - tx = events[0] + tx = next(item.payload for item in items if item.type == "transaction") assert tx["type"] == "transaction" assert tx["contexts"]["trace"]["origin"] == "manual" - invoke_agent_span = next(x for x in tx["spans"] if x["op"] == "gen_ai.invoke_agent") - chat_spans = list(x for x in tx["spans"] if x["op"] == "gen_ai.chat") - tool_exec_span = next(x for x in tx["spans"] if x["op"] == "gen_ai.execute_tool") + spans = [item.payload for item in items if item.type == "span"] + invoke_agent_span = next( + x for x in spans if x["attributes"]["sentry.op"] == "gen_ai.invoke_agent" + ) + chat_spans = list(x for x in spans if x["attributes"]["sentry.op"] == "gen_ai.chat") + tool_exec_span = next( + x for x in spans if x["attributes"]["sentry.op"] == "gen_ai.execute_tool" + ) assert len(chat_spans) == 2 - assert invoke_agent_span["origin"] == "auto.ai.langchain" - assert chat_spans[0]["origin"] == "auto.ai.langchain" - assert chat_spans[1]["origin"] == "auto.ai.langchain" - assert tool_exec_span["origin"] == "auto.ai.langchain" + assert invoke_agent_span["attributes"]["sentry.origin"] == "auto.ai.langchain" + assert chat_spans[0]["attributes"]["sentry.origin"] == "auto.ai.langchain" + assert chat_spans[1]["attributes"]["sentry.origin"] == "auto.ai.langchain" + assert tool_exec_span["attributes"]["sentry.origin"] == "auto.ai.langchain" # We can't guarantee anything about the "shape" of the langchain execution graph - assert len(list(x for x in tx["spans"] if x["op"] == "gen_ai.chat")) > 0 + assert ( + len(list(x for x in spans if x["attributes"]["sentry.op"] == "gen_ai.chat")) > 0 + ) # Token usage is only available in newer versions of langchain (v0.2+) # where usage_metadata is supported on AIMessageChunk - if "gen_ai.usage.input_tokens" in chat_spans[0]["data"]: - assert chat_spans[0]["data"]["gen_ai.usage.input_tokens"] == 142 - assert chat_spans[0]["data"]["gen_ai.usage.output_tokens"] == 50 - assert chat_spans[0]["data"]["gen_ai.usage.total_tokens"] == 192 + if "gen_ai.usage.input_tokens" in chat_spans[0]["attributes"]: + assert chat_spans[0]["attributes"]["gen_ai.usage.input_tokens"] == 142 + assert chat_spans[0]["attributes"]["gen_ai.usage.output_tokens"] == 50 + assert chat_spans[0]["attributes"]["gen_ai.usage.total_tokens"] == 192 - if "gen_ai.usage.input_tokens" in chat_spans[1]["data"]: - assert chat_spans[1]["data"]["gen_ai.usage.input_tokens"] == 89 - assert chat_spans[1]["data"]["gen_ai.usage.output_tokens"] == 28 - assert chat_spans[1]["data"]["gen_ai.usage.total_tokens"] == 117 + if "gen_ai.usage.input_tokens" in chat_spans[1]["attributes"]: + assert chat_spans[1]["attributes"]["gen_ai.usage.input_tokens"] == 89 + assert chat_spans[1]["attributes"]["gen_ai.usage.output_tokens"] == 28 + assert chat_spans[1]["attributes"]["gen_ai.usage.total_tokens"] == 117 if send_default_pii and include_prompts: - assert "5" in chat_spans[0]["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] - assert "word" in tool_exec_span["data"][SPANDATA.GEN_AI_TOOL_INPUT] - assert 5 == int(tool_exec_span["data"][SPANDATA.GEN_AI_TOOL_OUTPUT]) + assert "5" in chat_spans[0]["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] + assert "word" in tool_exec_span["attributes"][SPANDATA.GEN_AI_TOOL_INPUT] + assert 5 == int(tool_exec_span["attributes"][SPANDATA.GEN_AI_TOOL_OUTPUT]) param_id = request.node.callspec.id if "string" in param_id: @@ -742,7 +773,9 @@ def test_langchain_openai_tools_agent( "type": "text", "content": "You are very powerful assistant, but don't know current events", } - ] == json.loads(chat_spans[0]["data"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS]) + ] == json.loads( + chat_spans[0]["attributes"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS] + ) else: assert [ { @@ -753,15 +786,21 @@ def test_langchain_openai_tools_agent( "type": "text", "content": "Be concise and clear.", }, - ] == json.loads(chat_spans[0]["data"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS]) + ] == json.loads( + chat_spans[0]["attributes"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS] + ) - assert "5" in chat_spans[1]["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] + assert "5" in chat_spans[1]["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] # Verify tool calls are recorded when PII is enabled - assert SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS in chat_spans[0].get("data", {}), ( + assert SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS in chat_spans[0].get( + "attributes", {} + ), ( "Tool calls should be recorded when send_default_pii=True and include_prompts=True" ) - tool_calls_data = chat_spans[0]["data"][SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS] + tool_calls_data = chat_spans[0]["attributes"][ + SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS + ] assert isinstance(tool_calls_data, (list, str)) # Could be serialized if isinstance(tool_calls_data, str): assert "get_word_length" in tool_calls_data @@ -770,45 +809,55 @@ def test_langchain_openai_tools_agent( tool_call_str = str(tool_calls_data) assert "get_word_length" in tool_call_str else: - assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in chat_spans[0].get("data", {}) - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in chat_spans[0].get("data", {}) - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in chat_spans[0].get("data", {}) - assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in chat_spans[1].get("data", {}) - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in chat_spans[1].get("data", {}) - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in chat_spans[1].get("data", {}) - assert SPANDATA.GEN_AI_TOOL_INPUT not in tool_exec_span.get("data", {}) - assert SPANDATA.GEN_AI_TOOL_OUTPUT not in tool_exec_span.get("data", {}) + assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in chat_spans[0].get( + "attributes", {} + ) + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in chat_spans[0].get( + "attributes", {} + ) + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in chat_spans[0].get("attributes", {}) + assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in chat_spans[1].get( + "attributes", {} + ) + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in chat_spans[1].get( + "attributes", {} + ) + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in chat_spans[1].get("attributes", {}) + assert SPANDATA.GEN_AI_TOOL_INPUT not in tool_exec_span.get("attributes", {}) + assert SPANDATA.GEN_AI_TOOL_OUTPUT not in tool_exec_span.get("attributes", {}) # Verify tool calls are NOT recorded when PII is disabled assert SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS not in chat_spans[0].get( - "data", {} + "attributes", {} ), ( f"Tool calls should NOT be recorded when send_default_pii={send_default_pii} " f"and include_prompts={include_prompts}" ) assert SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS not in chat_spans[1].get( - "data", {} + "attributes", {} ), ( f"Tool calls should NOT be recorded when send_default_pii={send_default_pii} " f"and include_prompts={include_prompts}" ) # Verify finish_reasons is always an array of strings - assert chat_spans[0]["data"][SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS] == [ + assert chat_spans[0]["attributes"][SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS] == [ "function_call" ] - assert chat_spans[1]["data"][SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS] == ["stop"] + assert chat_spans[1]["attributes"][SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS] == [ + "stop" + ] # Verify that available tools are always recorded regardless of PII settings for chat_span in chat_spans: - tools_data = chat_span["data"][SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS] + tools_data = chat_span["attributes"][SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS] assert tools_data is not None, ( "Available tools should always be recorded regardless of PII settings" ) assert "get_word_length" in tools_data -def test_langchain_error(sentry_init, capture_events): +def test_langchain_error(sentry_init, capture_items): global llm_type llm_type = "acme-llm" @@ -817,7 +866,7 @@ def test_langchain_error(sentry_init, capture_events): traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("event", "transaction", "span") prompt = ChatPromptTemplate.from_messages( [ @@ -843,11 +892,11 @@ def test_langchain_error(sentry_init, capture_events): with start_transaction(), pytest.raises(ValueError): list(agent_executor.stream({"input": "How many letters in the word eudca"})) - error = events[0] + (error,) = (item.payload for item in items if item.type == "event") assert error["level"] == "error" -def test_span_status_error(sentry_init, capture_events): +def test_span_status_error(sentry_init, capture_items): global llm_type llm_type = "acme-llm" @@ -855,7 +904,7 @@ def test_span_status_error(sentry_init, capture_events): integrations=[LangchainIntegration(include_prompts=True)], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("event", "transaction", "span") with start_transaction(name="test"): prompt = ChatPromptTemplate.from_messages( @@ -884,10 +933,13 @@ def test_span_status_error(sentry_init, capture_events): with pytest.raises(ValueError): list(agent_executor.stream({"input": "How many letters in the word eudca"})) - (error, transaction) = events + (error,) = (item.payload for item in items if item.type == "event") assert error["level"] == "error" - assert transaction["spans"][0]["status"] == "internal_error" - assert transaction["spans"][0]["tags"]["status"] == "internal_error" + + spans = [item.payload for item in items if item.type == "span"] + assert spans[0]["status"] == "error" + + (transaction,) = (item.payload for item in items if item.type == "transaction") assert transaction["contexts"]["trace"]["status"] == "internal_error" @@ -935,7 +987,9 @@ def _llm_type(self): def _identifying_params(self): return {} - sentry_init(integrations=[LangchainIntegration()]) + sentry_init( + integrations=[LangchainIntegration()], _experiments={"gen_ai_as_v2_spans": True} + ) # Create a manual SentryLangchainCallback manual_callback = SentryLangchainCallback( @@ -1100,7 +1154,7 @@ def test_langchain_callback_list_existing_callback(sentry_init): assert handler is sentry_callback -def test_langchain_message_role_mapping(sentry_init, capture_events): +def test_langchain_message_role_mapping(sentry_init, capture_items): """Test that message roles are properly normalized in langchain integration.""" global llm_type llm_type = "openai-chat" @@ -1110,7 +1164,7 @@ def test_langchain_message_role_mapping(sentry_init, capture_events): traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") prompt = ChatPromptTemplate.from_messages( [ @@ -1146,19 +1200,18 @@ def test_langchain_message_role_mapping(sentry_init, capture_events): with start_transaction(): list(agent_executor.stream({"input": test_input})) - assert len(events) > 0 - tx = events[0] - assert tx["type"] == "transaction" - + spans = [item.payload for item in items if item.type == "span"] # Find spans with gen_ai operation that should have message data gen_ai_spans = [ - span for span in tx.get("spans", []) if span.get("op", "").startswith("gen_ai") + span + for span in spans + if span["attributes"].get("sentry.op", "").startswith("gen_ai") ] # Check if any span has message data with normalized roles message_data_found = False for span in gen_ai_spans: - span_data = span.get("data", {}) + span_data = span.get("attributes", {}) if SPANDATA.GEN_AI_REQUEST_MESSAGES in span_data: message_data_found = True messages_data = span_data[SPANDATA.GEN_AI_REQUEST_MESSAGES] @@ -1239,7 +1292,7 @@ def test_langchain_message_role_normalization_units(): assert normalized[5] == "string message" # String message unchanged -def test_langchain_message_truncation(sentry_init, capture_events): +def test_langchain_message_truncation(sentry_init, capture_items): """Test that large messages are truncated properly in Langchain integration.""" from langchain_core.outputs import LLMResult, Generation @@ -1248,7 +1301,7 @@ def test_langchain_message_truncation(sentry_init, capture_events): traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") callback = SentryLangchainCallback(max_span_map_size=100, include_prompts=True) @@ -1291,23 +1344,23 @@ def test_langchain_message_truncation(sentry_init, capture_events): ) callback.on_llm_end(response=response, run_id=run_id) - assert len(events) > 0 - tx = events[0] + tx = next(item.payload for item in items if item.type == "transaction") assert tx["type"] == "transaction" + spans = [item.payload for item in items if item.type == "span"] llm_spans = [ span - for span in tx.get("spans", []) - if span.get("op") == "gen_ai.text_completion" + for span in spans + if span["attributes"].get("sentry.op") == "gen_ai.text_completion" ] assert len(llm_spans) > 0 llm_span = llm_spans[0] - assert llm_span["data"]["gen_ai.operation.name"] == "text_completion" - assert llm_span["data"][SPANDATA.GEN_AI_PIPELINE_NAME] == "my_pipeline" + assert llm_span["attributes"]["gen_ai.operation.name"] == "text_completion" + assert llm_span["attributes"][SPANDATA.GEN_AI_PIPELINE_NAME] == "my_pipeline" - assert SPANDATA.GEN_AI_REQUEST_MESSAGES in llm_span["data"] - messages_data = llm_span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES in llm_span["attributes"] + messages_data = llm_span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] assert isinstance(messages_data, str) parsed_messages = json.loads(messages_data) @@ -1327,7 +1380,7 @@ def test_langchain_message_truncation(sentry_init, capture_events): ], ) def test_langchain_embeddings_sync( - sentry_init, capture_events, send_default_pii, include_prompts + sentry_init, capture_items, send_default_pii, include_prompts ): """Test that sync embedding methods (embed_documents, embed_query) are properly traced.""" try: @@ -1340,7 +1393,7 @@ def test_langchain_embeddings_sync( traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("transaction", "span") # Mock the actual API call with mock.patch.object( @@ -1362,27 +1415,28 @@ def test_langchain_embeddings_sync( assert len(result) == 2 mock_embed_documents.assert_called_once() - # Check captured events - assert len(events) >= 1 - tx = events[0] - assert tx["type"] == "transaction" - + spans = [item.payload for item in items if item.type == "span"] # Find embeddings span embeddings_spans = [ - span for span in tx.get("spans", []) if span.get("op") == "gen_ai.embeddings" + span + for span in spans + if span["attributes"].get("sentry.op") == "gen_ai.embeddings" ] assert len(embeddings_spans) == 1 embeddings_span = embeddings_spans[0] - assert embeddings_span["description"] == "embeddings text-embedding-ada-002" - assert embeddings_span["origin"] == "auto.ai.langchain" - assert embeddings_span["data"]["gen_ai.operation.name"] == "embeddings" - assert embeddings_span["data"]["gen_ai.request.model"] == "text-embedding-ada-002" + assert embeddings_span["name"] == "embeddings text-embedding-ada-002" + assert embeddings_span["attributes"]["sentry.origin"] == "auto.ai.langchain" + assert embeddings_span["attributes"]["gen_ai.operation.name"] == "embeddings" + assert ( + embeddings_span["attributes"]["gen_ai.request.model"] + == "text-embedding-ada-002" + ) # Check if input is captured based on PII settings if send_default_pii and include_prompts: - assert SPANDATA.GEN_AI_EMBEDDINGS_INPUT in embeddings_span["data"] - input_data = embeddings_span["data"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT] + assert SPANDATA.GEN_AI_EMBEDDINGS_INPUT in embeddings_span["attributes"] + input_data = embeddings_span["attributes"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT] # Could be serialized as string if isinstance(input_data, str): assert "Hello world" in input_data @@ -1391,7 +1445,9 @@ def test_langchain_embeddings_sync( assert "Hello world" in input_data assert "Test document" in input_data else: - assert SPANDATA.GEN_AI_EMBEDDINGS_INPUT not in embeddings_span.get("data", {}) + assert SPANDATA.GEN_AI_EMBEDDINGS_INPUT not in embeddings_span.get( + "attributes", {} + ) @pytest.mark.parametrize( @@ -1402,7 +1458,7 @@ def test_langchain_embeddings_sync( ], ) def test_langchain_embeddings_embed_query( - sentry_init, capture_events, send_default_pii, include_prompts + sentry_init, capture_items, send_default_pii, include_prompts ): """Test that embed_query method is properly traced.""" try: @@ -1415,7 +1471,7 @@ def test_langchain_embeddings_embed_query( traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("transaction", "span") # Mock the actual API call with mock.patch.object( @@ -1436,32 +1492,35 @@ def test_langchain_embeddings_embed_query( assert len(result) == 3 mock_embed_query.assert_called_once() - # Check captured events - assert len(events) >= 1 - tx = events[0] - assert tx["type"] == "transaction" - + spans = [item.payload for item in items if item.type == "span"] # Find embeddings span embeddings_spans = [ - span for span in tx.get("spans", []) if span.get("op") == "gen_ai.embeddings" + span + for span in spans + if span["attributes"].get("sentry.op") == "gen_ai.embeddings" ] assert len(embeddings_spans) == 1 embeddings_span = embeddings_spans[0] - assert embeddings_span["data"]["gen_ai.operation.name"] == "embeddings" - assert embeddings_span["data"]["gen_ai.request.model"] == "text-embedding-ada-002" + assert embeddings_span["attributes"]["gen_ai.operation.name"] == "embeddings" + assert ( + embeddings_span["attributes"]["gen_ai.request.model"] + == "text-embedding-ada-002" + ) # Check if input is captured based on PII settings if send_default_pii and include_prompts: - assert SPANDATA.GEN_AI_EMBEDDINGS_INPUT in embeddings_span["data"] - input_data = embeddings_span["data"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT] + assert SPANDATA.GEN_AI_EMBEDDINGS_INPUT in embeddings_span["attributes"] + input_data = embeddings_span["attributes"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT] # Could be serialized as string if isinstance(input_data, str): assert "What is the capital of France?" in input_data else: assert "What is the capital of France?" in input_data else: - assert SPANDATA.GEN_AI_EMBEDDINGS_INPUT not in embeddings_span.get("data", {}) + assert SPANDATA.GEN_AI_EMBEDDINGS_INPUT not in embeddings_span.get( + "attributes", {} + ) @pytest.mark.parametrize( @@ -1473,7 +1532,7 @@ def test_langchain_embeddings_embed_query( ) @pytest.mark.asyncio async def test_langchain_embeddings_async( - sentry_init, capture_events, send_default_pii, include_prompts + sentry_init, capture_items, send_default_pii, include_prompts ): """Test that async embedding methods (aembed_documents, aembed_query) are properly traced.""" try: @@ -1486,7 +1545,7 @@ async def test_langchain_embeddings_async( traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("transaction", "span") async def mock_aembed_documents(self, texts): return [[0.1, 0.2, 0.3] for _ in texts] @@ -1512,38 +1571,41 @@ async def mock_aembed_documents(self, texts): assert len(result) == 2 mock_aembed.assert_called_once() - # Check captured events - assert len(events) >= 1 - tx = events[0] - assert tx["type"] == "transaction" - + spans = [item.payload for item in items if item.type == "span"] # Find embeddings span embeddings_spans = [ - span for span in tx.get("spans", []) if span.get("op") == "gen_ai.embeddings" + span + for span in spans + if span["attributes"].get("sentry.op") == "gen_ai.embeddings" ] assert len(embeddings_spans) == 1 embeddings_span = embeddings_spans[0] - assert embeddings_span["description"] == "embeddings text-embedding-ada-002" - assert embeddings_span["origin"] == "auto.ai.langchain" - assert embeddings_span["data"]["gen_ai.operation.name"] == "embeddings" - assert embeddings_span["data"]["gen_ai.request.model"] == "text-embedding-ada-002" + assert embeddings_span["name"] == "embeddings text-embedding-ada-002" + assert embeddings_span["attributes"]["sentry.origin"] == "auto.ai.langchain" + assert embeddings_span["attributes"]["gen_ai.operation.name"] == "embeddings" + assert ( + embeddings_span["attributes"]["gen_ai.request.model"] + == "text-embedding-ada-002" + ) # Check if input is captured based on PII settings if send_default_pii and include_prompts: - assert SPANDATA.GEN_AI_EMBEDDINGS_INPUT in embeddings_span["data"] - input_data = embeddings_span["data"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT] + assert SPANDATA.GEN_AI_EMBEDDINGS_INPUT in embeddings_span["attributes"] + input_data = embeddings_span["attributes"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT] # Could be serialized as string if isinstance(input_data, str): assert "Async hello" in input_data or "Async test document" in input_data else: assert "Async hello" in input_data or "Async test document" in input_data else: - assert SPANDATA.GEN_AI_EMBEDDINGS_INPUT not in embeddings_span.get("data", {}) + assert SPANDATA.GEN_AI_EMBEDDINGS_INPUT not in embeddings_span.get( + "attributes", {} + ) @pytest.mark.asyncio -async def test_langchain_embeddings_aembed_query(sentry_init, capture_events): +async def test_langchain_embeddings_aembed_query(sentry_init, capture_items): """Test that aembed_query method is properly traced.""" try: from langchain_openai import OpenAIEmbeddings @@ -1555,7 +1617,7 @@ async def test_langchain_embeddings_aembed_query(sentry_init, capture_events): traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") async def mock_aembed_query(self, text): return [0.1, 0.2, 0.3] @@ -1579,24 +1641,25 @@ async def mock_aembed_query(self, text): assert len(result) == 3 mock_aembed.assert_called_once() - # Check captured events - assert len(events) >= 1 - tx = events[0] - assert tx["type"] == "transaction" - + spans = [item.payload for item in items if item.type == "span"] # Find embeddings span embeddings_spans = [ - span for span in tx.get("spans", []) if span.get("op") == "gen_ai.embeddings" + span + for span in spans + if span["attributes"].get("sentry.op") == "gen_ai.embeddings" ] assert len(embeddings_spans) == 1 embeddings_span = embeddings_spans[0] - assert embeddings_span["data"]["gen_ai.operation.name"] == "embeddings" - assert embeddings_span["data"]["gen_ai.request.model"] == "text-embedding-ada-002" + assert embeddings_span["attributes"]["gen_ai.operation.name"] == "embeddings" + assert ( + embeddings_span["attributes"]["gen_ai.request.model"] + == "text-embedding-ada-002" + ) # Check if input is captured - assert SPANDATA.GEN_AI_EMBEDDINGS_INPUT in embeddings_span["data"] - input_data = embeddings_span["data"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT] + assert SPANDATA.GEN_AI_EMBEDDINGS_INPUT in embeddings_span["attributes"] + input_data = embeddings_span["attributes"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT] # Could be serialized as string if isinstance(input_data, str): assert "Async query test" in input_data @@ -1604,7 +1667,7 @@ async def mock_aembed_query(self, text): assert "Async query test" in input_data -def test_langchain_embeddings_no_model_name(sentry_init, capture_events): +def test_langchain_embeddings_no_model_name(sentry_init, capture_items): """Test embeddings when model name is not available.""" try: from langchain_openai import OpenAIEmbeddings @@ -1615,7 +1678,7 @@ def test_langchain_embeddings_no_model_name(sentry_init, capture_events): integrations=[LangchainIntegration(include_prompts=False)], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") # Mock the actual API call and remove model attribute with mock.patch.object( @@ -1635,28 +1698,26 @@ def test_langchain_embeddings_no_model_name(sentry_init, capture_events): with start_transaction(name="test_embeddings_no_model"): embeddings.embed_documents(["Test"]) - # Check captured events - assert len(events) >= 1 - tx = events[0] - assert tx["type"] == "transaction" - + spans = [item.payload for item in items if item.type == "span"] # Find embeddings span embeddings_spans = [ - span for span in tx.get("spans", []) if span.get("op") == "gen_ai.embeddings" + span + for span in spans + if span["attributes"].get("sentry.op") == "gen_ai.embeddings" ] assert len(embeddings_spans) == 1 embeddings_span = embeddings_spans[0] - assert embeddings_span["description"] == "embeddings" - assert embeddings_span["data"]["gen_ai.operation.name"] == "embeddings" + assert embeddings_span["name"] == "embeddings" + assert embeddings_span["attributes"]["gen_ai.operation.name"] == "embeddings" # Model name should not be set if not available assert ( - "gen_ai.request.model" not in embeddings_span["data"] - or embeddings_span["data"]["gen_ai.request.model"] is None + "gen_ai.request.model" not in embeddings_span["attributes"] + or embeddings_span["attributes"]["gen_ai.request.model"] is None ) -def test_langchain_embeddings_integration_disabled(sentry_init, capture_events): +def test_langchain_embeddings_integration_disabled(sentry_init, capture_items): """Test that embeddings are not traced when integration is disabled.""" try: from langchain_openai import OpenAIEmbeddings @@ -1664,8 +1725,8 @@ def test_langchain_embeddings_integration_disabled(sentry_init, capture_events): pytest.skip("langchain_openai not installed") # Initialize without LangchainIntegration - sentry_init(traces_sample_rate=1.0) - events = capture_events() + sentry_init(traces_sample_rate=1.0, _experiments={"gen_ai_as_v2_spans": True}) + items = capture_items("transaction", "span") with mock.patch.object( OpenAIEmbeddings, @@ -1680,18 +1741,17 @@ def test_langchain_embeddings_integration_disabled(sentry_init, capture_events): embeddings.embed_documents(["Test"]) # Check that no embeddings spans were created - if events: - tx = events[0] - embeddings_spans = [ - span - for span in tx.get("spans", []) - if span.get("op") == "gen_ai.embeddings" - ] - # Should be empty since integration is disabled - assert len(embeddings_spans) == 0 + spans = [item.payload for item in items if item.type == "span"] + embeddings_spans = [ + span + for span in spans + if span["attributes"].get("sentry.op") == "gen_ai.embeddings" + ] + # Should be empty since integration is disabled + assert len(embeddings_spans) == 0 -def test_langchain_embeddings_multiple_providers(sentry_init, capture_events): +def test_langchain_embeddings_multiple_providers(sentry_init, capture_items): """Test that embeddings work with different providers.""" try: from langchain_openai import OpenAIEmbeddings, AzureOpenAIEmbeddings @@ -1703,7 +1763,7 @@ def test_langchain_embeddings_multiple_providers(sentry_init, capture_events): traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") # Mock both providers with mock.patch.object( @@ -1731,26 +1791,24 @@ def test_langchain_embeddings_multiple_providers(sentry_init, capture_events): openai_embeddings.embed_documents(["OpenAI test"]) azure_embeddings.embed_documents(["Azure test"]) - # Check captured events - assert len(events) >= 1 - tx = events[0] - assert tx["type"] == "transaction" - + spans = [item.payload for item in items if item.type == "span"] # Find embeddings spans embeddings_spans = [ - span for span in tx.get("spans", []) if span.get("op") == "gen_ai.embeddings" + span + for span in spans + if span["attributes"].get("sentry.op") == "gen_ai.embeddings" ] # Should have 2 spans, one for each provider assert len(embeddings_spans) == 2 # Verify both spans have proper data for span in embeddings_spans: - assert span["data"]["gen_ai.operation.name"] == "embeddings" - assert span["data"]["gen_ai.request.model"] == "text-embedding-ada-002" - assert SPANDATA.GEN_AI_EMBEDDINGS_INPUT in span["data"] + assert span["attributes"]["gen_ai.operation.name"] == "embeddings" + assert span["attributes"]["gen_ai.request.model"] == "text-embedding-ada-002" + assert SPANDATA.GEN_AI_EMBEDDINGS_INPUT in span["attributes"] -def test_langchain_embeddings_error_handling(sentry_init, capture_events): +def test_langchain_embeddings_error_handling(sentry_init, capture_items): """Test that errors in embeddings are properly captured.""" try: from langchain_openai import OpenAIEmbeddings @@ -1762,7 +1820,7 @@ def test_langchain_embeddings_error_handling(sentry_init, capture_events): traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") # Mock the API call to raise an error with mock.patch.object( @@ -1781,15 +1839,16 @@ def test_langchain_embeddings_error_handling(sentry_init, capture_events): with pytest.raises(ValueError): embeddings.embed_documents(["Test"]) - # The error should be captured - assert len(events) >= 1 - # We should have both the transaction and potentially an error event - [e for e in events if e.get("level") == "error"] + [ + item.payload + for item in items + if item.type == "event" and item.payload.get("level") == "error" + ] # Note: errors might not be auto-captured depending on SDK settings, # but the span should still be created -def test_langchain_embeddings_multiple_calls(sentry_init, capture_events): +def test_langchain_embeddings_multiple_calls(sentry_init, capture_items): """Test that multiple embeddings calls within a transaction are all traced.""" try: from langchain_openai import OpenAIEmbeddings @@ -1801,7 +1860,7 @@ def test_langchain_embeddings_multiple_calls(sentry_init, capture_events): traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") # Mock the actual API calls with mock.patch.object( @@ -1828,32 +1887,31 @@ def test_langchain_embeddings_multiple_calls(sentry_init, capture_events): # Call embed_documents again embeddings.embed_documents(["Third batch"]) - # Check captured events - assert len(events) >= 1 - tx = events[0] - assert tx["type"] == "transaction" - + spans = [item.payload for item in items if item.type == "span"] # Find embeddings spans - should have 3 (2 embed_documents + 1 embed_query) embeddings_spans = [ - span for span in tx.get("spans", []) if span.get("op") == "gen_ai.embeddings" + span + for span in spans + if span["attributes"].get("sentry.op") == "gen_ai.embeddings" ] assert len(embeddings_spans) == 3 # Verify all spans have proper data for span in embeddings_spans: - assert span["data"]["gen_ai.operation.name"] == "embeddings" - assert span["data"]["gen_ai.request.model"] == "text-embedding-ada-002" - assert SPANDATA.GEN_AI_EMBEDDINGS_INPUT in span["data"] + assert span["attributes"]["gen_ai.operation.name"] == "embeddings" + assert span["attributes"]["gen_ai.request.model"] == "text-embedding-ada-002" + assert SPANDATA.GEN_AI_EMBEDDINGS_INPUT in span["attributes"] # Verify the input data is different for each span input_data_list = [ - span["data"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT] for span in embeddings_spans + span["attributes"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT] + for span in embeddings_spans ] # They should all be different (different inputs) assert len(set(str(data) for data in input_data_list)) == 3 -def test_langchain_embeddings_span_hierarchy(sentry_init, capture_events): +def test_langchain_embeddings_span_hierarchy(sentry_init, capture_items): """Test that embeddings spans are properly nested within parent spans.""" try: from langchain_openai import OpenAIEmbeddings @@ -1865,7 +1923,7 @@ def test_langchain_embeddings_span_hierarchy(sentry_init, capture_events): traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") # Mock the actual API call with mock.patch.object( @@ -1884,15 +1942,15 @@ def test_langchain_embeddings_span_hierarchy(sentry_init, capture_events): with sentry_sdk.start_span(op="custom", name="custom operation"): embeddings.embed_documents(["Test within custom span"]) - # Check captured events - assert len(events) >= 1 - tx = events[0] - assert tx["type"] == "transaction" - + spans = [item.payload for item in items if item.type == "span"] # Find all spans embeddings_spans = [ - span for span in tx.get("spans", []) if span.get("op") == "gen_ai.embeddings" + span + for span in spans + if span["attributes"].get("sentry.op") == "gen_ai.embeddings" ] + + tx = next(item.payload for item in items if item.type == "transaction") custom_spans = [span for span in tx.get("spans", []) if span.get("op") == "custom"] assert len(embeddings_spans) == 1 @@ -1902,11 +1960,11 @@ def test_langchain_embeddings_span_hierarchy(sentry_init, capture_events): embeddings_span = embeddings_spans[0] custom_span = custom_spans[0] - assert embeddings_span["data"]["gen_ai.operation.name"] == "embeddings" + assert embeddings_span["attributes"]["gen_ai.operation.name"] == "embeddings" assert custom_span["description"] == "custom operation" -def test_langchain_embeddings_with_list_and_string_inputs(sentry_init, capture_events): +def test_langchain_embeddings_with_list_and_string_inputs(sentry_init, capture_items): """Test that embeddings correctly handle both list and string inputs.""" try: from langchain_openai import OpenAIEmbeddings @@ -1918,7 +1976,7 @@ def test_langchain_embeddings_with_list_and_string_inputs(sentry_init, capture_e traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") # Mock the actual API calls with mock.patch.object( @@ -1943,21 +2001,19 @@ def test_langchain_embeddings_with_list_and_string_inputs(sentry_init, capture_e # embed_query takes a string embeddings.embed_query("Single string query") - # Check captured events - assert len(events) >= 1 - tx = events[0] - assert tx["type"] == "transaction" - + spans = [item.payload for item in items if item.type == "span"] # Find embeddings spans embeddings_spans = [ - span for span in tx.get("spans", []) if span.get("op") == "gen_ai.embeddings" + span + for span in spans + if span["attributes"].get("sentry.op") == "gen_ai.embeddings" ] assert len(embeddings_spans) == 2 # Both should have input data captured as lists for span in embeddings_spans: - assert SPANDATA.GEN_AI_EMBEDDINGS_INPUT in span["data"] - input_data = span["data"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT] + assert SPANDATA.GEN_AI_EMBEDDINGS_INPUT in span["attributes"] + input_data = span["attributes"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT] # Input should be normalized to list format if isinstance(input_data, str): # If serialized, should contain the input text @@ -1975,7 +2031,7 @@ def test_langchain_embeddings_with_list_and_string_inputs(sentry_init, capture_e ) def test_langchain_response_model_extraction( sentry_init, - capture_events, + capture_items, response_metadata_model, expected_model, ): @@ -1984,7 +2040,7 @@ def test_langchain_response_model_extraction( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") callback = SentryLangchainCallback(max_span_map_size=100, include_prompts=True) @@ -2009,25 +2065,22 @@ def test_langchain_response_model_extraction( response = Mock(generations=[[generation]]) callback.on_llm_end(response=response, run_id=run_id) - assert len(events) > 0 - tx = events[0] - assert tx["type"] == "transaction" - + spans = [item.payload for item in items if item.type == "span"] llm_spans = [ span - for span in tx.get("spans", []) - if span.get("op") == "gen_ai.text_completion" + for span in spans + if span["attributes"].get("sentry.op") == "gen_ai.text_completion" ] assert len(llm_spans) > 0 llm_span = llm_spans[0] - assert llm_span["data"]["gen_ai.operation.name"] == "text_completion" + assert llm_span["attributes"]["gen_ai.operation.name"] == "text_completion" if expected_model is not None: - assert SPANDATA.GEN_AI_RESPONSE_MODEL in llm_span["data"] - assert llm_span["data"][SPANDATA.GEN_AI_RESPONSE_MODEL] == expected_model + assert SPANDATA.GEN_AI_RESPONSE_MODEL in llm_span["attributes"] + assert llm_span["attributes"][SPANDATA.GEN_AI_RESPONSE_MODEL] == expected_model else: - assert SPANDATA.GEN_AI_RESPONSE_MODEL not in llm_span.get("data", {}) + assert SPANDATA.GEN_AI_RESPONSE_MODEL not in llm_span.get("attributes", {}) # Tests for multimodal content transformation functions @@ -2286,13 +2339,13 @@ def test_transform_google_file_data(self): ], ) def test_langchain_ai_system_detection( - sentry_init, capture_events, ai_type, expected_system + sentry_init, capture_items, ai_type, expected_system ): sentry_init( integrations=[LangchainIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") callback = SentryLangchainCallback(max_span_map_size=100, include_prompts=True) @@ -2312,23 +2365,20 @@ def test_langchain_ai_system_detection( response = Mock(generations=[[generation]]) callback.on_llm_end(response=response, run_id=run_id) - assert len(events) > 0 - tx = events[0] - assert tx["type"] == "transaction" - + spans = [item.payload for item in items if item.type == "span"] llm_spans = [ span - for span in tx.get("spans", []) - if span.get("op") == "gen_ai.text_completion" + for span in spans + if span["attributes"].get("sentry.op") == "gen_ai.text_completion" ] assert len(llm_spans) > 0 llm_span = llm_spans[0] if expected_system is not None: - assert llm_span["data"][SPANDATA.GEN_AI_SYSTEM] == expected_system + assert llm_span["attributes"][SPANDATA.GEN_AI_SYSTEM] == expected_system else: - assert SPANDATA.GEN_AI_SYSTEM not in llm_span.get("data", {}) + assert SPANDATA.GEN_AI_SYSTEM not in llm_span.get("attributes", {}) class TestTransformLangchainMessageContent: diff --git a/tests/integrations/langgraph/test_langgraph.py b/tests/integrations/langgraph/test_langgraph.py index 2a385d8a78..e1a3baa0a8 100644 --- a/tests/integrations/langgraph/test_langgraph.py +++ b/tests/integrations/langgraph/test_langgraph.py @@ -147,7 +147,7 @@ def test_langgraph_integration_init(): ], ) def test_state_graph_compile( - sentry_init, capture_events, send_default_pii, include_prompts + sentry_init, capture_items, send_default_pii, include_prompts ): """Test StateGraph.compile() wrapper creates proper create_agent span.""" sentry_init( @@ -155,7 +155,7 @@ def test_state_graph_compile( traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("transaction", "span") graph = MockStateGraph() def original_compile(self, *args, **kwargs): @@ -171,21 +171,23 @@ def original_compile(self, *args, **kwargs): assert compiled_graph is not None assert compiled_graph.name == "test_graph" - tx = events[0] - assert tx["type"] == "transaction" - - agent_spans = [span for span in tx["spans"] if span["op"] == OP.GEN_AI_CREATE_AGENT] + spans = [item.payload for item in items if item.type == "span"] + agent_spans = [ + span + for span in spans + if span["attributes"]["sentry.op"] == OP.GEN_AI_CREATE_AGENT + ] assert len(agent_spans) == 1 agent_span = agent_spans[0] - assert agent_span["description"] == "create_agent test_graph" - assert agent_span["origin"] == "auto.ai.langgraph" - assert agent_span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "create_agent" - assert agent_span["data"][SPANDATA.GEN_AI_AGENT_NAME] == "test_graph" - assert agent_span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "test-model" - assert SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS in agent_span["data"] - - tools_data = agent_span["data"][SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS] + assert agent_span["name"] == "create_agent test_graph" + assert agent_span["attributes"]["sentry.origin"] == "auto.ai.langgraph" + assert agent_span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "create_agent" + assert agent_span["attributes"][SPANDATA.GEN_AI_AGENT_NAME] == "test_graph" + assert agent_span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "test-model" + assert SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS in agent_span["attributes"] + + tools_data = agent_span["attributes"][SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS] assert tools_data == ["search_tool", "calculator"] assert len(tools_data) == 2 assert "search_tool" in tools_data @@ -201,14 +203,14 @@ def original_compile(self, *args, **kwargs): (False, False), ], ) -def test_pregel_invoke(sentry_init, capture_events, send_default_pii, include_prompts): +def test_pregel_invoke(sentry_init, capture_items, send_default_pii, include_prompts): """Test Pregel.invoke() wrapper creates proper invoke_agent span.""" sentry_init( integrations=[LanggraphIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("transaction", "span") test_state = { "messages": [ @@ -245,26 +247,26 @@ def original_invoke(self, *args, **kwargs): assert result is not None - tx = events[0] - assert tx["type"] == "transaction" - + spans = [item.payload for item in items if item.type == "span"] invoke_spans = [ - span for span in tx["spans"] if span["op"] == OP.GEN_AI_INVOKE_AGENT + span + for span in spans + if span["attributes"]["sentry.op"] == OP.GEN_AI_INVOKE_AGENT ] assert len(invoke_spans) == 1 invoke_span = invoke_spans[0] - assert invoke_span["description"] == "invoke_agent test_graph" - assert invoke_span["origin"] == "auto.ai.langgraph" - assert invoke_span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "invoke_agent" - assert invoke_span["data"][SPANDATA.GEN_AI_PIPELINE_NAME] == "test_graph" - assert invoke_span["data"][SPANDATA.GEN_AI_AGENT_NAME] == "test_graph" + assert invoke_span["name"] == "invoke_agent test_graph" + assert invoke_span["attributes"]["sentry.origin"] == "auto.ai.langgraph" + assert invoke_span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "invoke_agent" + assert invoke_span["attributes"][SPANDATA.GEN_AI_PIPELINE_NAME] == "test_graph" + assert invoke_span["attributes"][SPANDATA.GEN_AI_AGENT_NAME] == "test_graph" if send_default_pii and include_prompts: - assert SPANDATA.GEN_AI_REQUEST_MESSAGES in invoke_span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT in invoke_span["data"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES in invoke_span["attributes"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT in invoke_span["attributes"] - request_messages = invoke_span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + request_messages = invoke_span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] if isinstance(request_messages, str): import json @@ -273,11 +275,11 @@ def original_invoke(self, *args, **kwargs): assert len(request_messages) == 1 assert request_messages[0]["content"] == "Of course! How can I assist you?" - response_text = invoke_span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] + response_text = invoke_span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] assert response_text == expected_assistant_response - assert SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS in invoke_span["data"] - tool_calls_data = invoke_span["data"][SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS] + assert SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS in invoke_span["attributes"] + tool_calls_data = invoke_span["attributes"][SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS] if isinstance(tool_calls_data, str): import json @@ -287,9 +289,11 @@ def original_invoke(self, *args, **kwargs): assert tool_calls_data[0]["id"] == "call_test_123" assert tool_calls_data[0]["function"]["name"] == "search_tool" else: - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in invoke_span.get("data", {}) - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in invoke_span.get("data", {}) - assert SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS not in invoke_span.get("data", {}) + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in invoke_span.get("attributes", {}) + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in invoke_span.get("attributes", {}) + assert SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS not in invoke_span.get( + "attributes", {} + ) @pytest.mark.parametrize( @@ -301,14 +305,14 @@ def original_invoke(self, *args, **kwargs): (False, False), ], ) -def test_pregel_ainvoke(sentry_init, capture_events, send_default_pii, include_prompts): +def test_pregel_ainvoke(sentry_init, capture_items, send_default_pii, include_prompts): """Test Pregel.ainvoke() async wrapper creates proper invoke_agent span.""" sentry_init( integrations=[LanggraphIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("transaction", "span") test_state = {"messages": [MockMessage("What's the weather like?", name="user")]} pregel = MockPregelInstance("async_graph") @@ -341,30 +345,30 @@ async def run_test(): result = asyncio.run(run_test()) assert result is not None - tx = events[0] - assert tx["type"] == "transaction" - + spans = [item.payload for item in items if item.type == "span"] invoke_spans = [ - span for span in tx["spans"] if span["op"] == OP.GEN_AI_INVOKE_AGENT + span + for span in spans + if span["attributes"]["sentry.op"] == OP.GEN_AI_INVOKE_AGENT ] assert len(invoke_spans) == 1 invoke_span = invoke_spans[0] - assert invoke_span["description"] == "invoke_agent async_graph" - assert invoke_span["origin"] == "auto.ai.langgraph" - assert invoke_span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "invoke_agent" - assert invoke_span["data"][SPANDATA.GEN_AI_PIPELINE_NAME] == "async_graph" - assert invoke_span["data"][SPANDATA.GEN_AI_AGENT_NAME] == "async_graph" + assert invoke_span["name"] == "invoke_agent async_graph" + assert invoke_span["attributes"]["sentry.origin"] == "auto.ai.langgraph" + assert invoke_span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "invoke_agent" + assert invoke_span["attributes"][SPANDATA.GEN_AI_PIPELINE_NAME] == "async_graph" + assert invoke_span["attributes"][SPANDATA.GEN_AI_AGENT_NAME] == "async_graph" if send_default_pii and include_prompts: - assert SPANDATA.GEN_AI_REQUEST_MESSAGES in invoke_span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT in invoke_span["data"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES in invoke_span["attributes"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT in invoke_span["attributes"] - response_text = invoke_span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] + response_text = invoke_span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] assert response_text == expected_assistant_response - assert SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS in invoke_span["data"] - tool_calls_data = invoke_span["data"][SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS] + assert SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS in invoke_span["attributes"] + tool_calls_data = invoke_span["attributes"][SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS] if isinstance(tool_calls_data, str): import json @@ -374,19 +378,21 @@ async def run_test(): assert tool_calls_data[0]["id"] == "call_weather_456" assert tool_calls_data[0]["function"]["name"] == "get_weather" else: - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in invoke_span.get("data", {}) - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in invoke_span.get("data", {}) - assert SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS not in invoke_span.get("data", {}) + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in invoke_span.get("attributes", {}) + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in invoke_span.get("attributes", {}) + assert SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS not in invoke_span.get( + "attributes", {} + ) -def test_pregel_invoke_error(sentry_init, capture_events): +def test_pregel_invoke_error(sentry_init, capture_items): """Test error handling during graph execution.""" sentry_init( integrations=[LanggraphIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") test_state = {"messages": [MockMessage("This will fail")]} pregel = MockPregelInstance("error_graph") @@ -397,25 +403,26 @@ def original_invoke(self, *args, **kwargs): wrapped_invoke = _wrap_pregel_invoke(original_invoke) wrapped_invoke(pregel, test_state) - tx = events[0] + spans = [item.payload for item in items if item.type == "span"] invoke_spans = [ - span for span in tx["spans"] if span["op"] == OP.GEN_AI_INVOKE_AGENT + span + for span in spans + if span["attributes"]["sentry.op"] == OP.GEN_AI_INVOKE_AGENT ] assert len(invoke_spans) == 1 invoke_span = invoke_spans[0] - assert invoke_span.get("status") == "internal_error" - assert invoke_span.get("tags", {}).get("status") == "internal_error" + assert invoke_span.get("status") == "error" -def test_pregel_ainvoke_error(sentry_init, capture_events): +def test_pregel_ainvoke_error(sentry_init, capture_items): """Test error handling during async graph execution.""" sentry_init( integrations=[LanggraphIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") test_state = {"messages": [MockMessage("This will fail async")]} pregel = MockPregelInstance("async_error_graph") @@ -431,24 +438,25 @@ async def run_error_test(): asyncio.run(run_error_test()) - tx = events[0] + spans = [item.payload for item in items if item.type == "span"] invoke_spans = [ - span for span in tx["spans"] if span["op"] == OP.GEN_AI_INVOKE_AGENT + span + for span in spans + if span["attributes"]["sentry.op"] == OP.GEN_AI_INVOKE_AGENT ] assert len(invoke_spans) == 1 invoke_span = invoke_spans[0] - assert invoke_span.get("status") == "internal_error" - assert invoke_span.get("tags", {}).get("status") == "internal_error" + assert invoke_span.get("status") == "error" -def test_span_origin(sentry_init, capture_events): +def test_span_origin(sentry_init, capture_items): """Test that span origins are correctly set.""" sentry_init( integrations=[LanggraphIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") graph = MockStateGraph() @@ -461,16 +469,17 @@ def original_compile(self, *args, **kwargs): wrapped_compile = _wrap_state_graph_compile(original_compile) wrapped_compile(graph) - tx = events[0] + tx = next(item.payload for item in items if item.type == "transaction") assert tx["contexts"]["trace"]["origin"] == "manual" - for span in tx["spans"]: - assert span["origin"] == "auto.ai.langgraph" + spans = [item.payload for item in items if item.type == "span"] + for span in spans: + assert span["attributes"]["sentry.origin"] == "auto.ai.langgraph" @pytest.mark.parametrize("graph_name", ["my_graph", None, ""]) def test_pregel_invoke_with_different_graph_names( - sentry_init, capture_events, graph_name + sentry_init, capture_items, graph_name ): """Test Pregel.invoke() with different graph name scenarios.""" sentry_init( @@ -478,7 +487,7 @@ def test_pregel_invoke_with_different_graph_names( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") pregel = MockPregelInstance(graph_name) if graph_name else MockPregelInstance() if not graph_name: @@ -492,25 +501,27 @@ def original_invoke(self, *args, **kwargs): wrapped_invoke = _wrap_pregel_invoke(original_invoke) wrapped_invoke(pregel, {"messages": []}) - tx = events[0] + spans = [item.payload for item in items if item.type == "span"] invoke_spans = [ - span for span in tx["spans"] if span["op"] == OP.GEN_AI_INVOKE_AGENT + span + for span in spans + if span["attributes"]["sentry.op"] == OP.GEN_AI_INVOKE_AGENT ] assert len(invoke_spans) == 1 invoke_span = invoke_spans[0] if graph_name and graph_name.strip(): - assert invoke_span["description"] == "invoke_agent my_graph" - assert invoke_span["data"][SPANDATA.GEN_AI_PIPELINE_NAME] == graph_name - assert invoke_span["data"][SPANDATA.GEN_AI_AGENT_NAME] == graph_name + assert invoke_span["name"] == "invoke_agent my_graph" + assert invoke_span["attributes"][SPANDATA.GEN_AI_PIPELINE_NAME] == graph_name + assert invoke_span["attributes"][SPANDATA.GEN_AI_AGENT_NAME] == graph_name else: - assert invoke_span["description"] == "invoke_agent" - assert SPANDATA.GEN_AI_PIPELINE_NAME not in invoke_span.get("data", {}) - assert SPANDATA.GEN_AI_AGENT_NAME not in invoke_span.get("data", {}) + assert invoke_span["name"] == "invoke_agent" + assert SPANDATA.GEN_AI_PIPELINE_NAME not in invoke_span.get("attributes", {}) + assert SPANDATA.GEN_AI_AGENT_NAME not in invoke_span.get("attributes", {}) -def test_pregel_invoke_span_includes_usage_data(sentry_init, capture_events): +def test_pregel_invoke_span_includes_usage_data(sentry_init, capture_items): """ Test that invoke_agent spans include aggregated usage data from context_wrapper. This verifies the new functionality added to track token usage in invoke_agent spans. @@ -519,7 +530,7 @@ def test_pregel_invoke_span_includes_usage_data(sentry_init, capture_events): integrations=[LanggraphIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") test_state = { "messages": [ @@ -564,29 +575,29 @@ def original_invoke(self, *args, **kwargs): assert result is not None - tx = events[0] - assert tx["type"] == "transaction" - + spans = [item.payload for item in items if item.type == "span"] invoke_spans = [ - span for span in tx["spans"] if span["op"] == OP.GEN_AI_INVOKE_AGENT + span + for span in spans + if span["attributes"]["sentry.op"] == OP.GEN_AI_INVOKE_AGENT ] assert len(invoke_spans) == 1 invoke_agent_span = invoke_spans[0] # Verify invoke_agent span has usage data - assert invoke_agent_span["description"] == "invoke_agent test_graph" - assert "gen_ai.usage.input_tokens" in invoke_agent_span["data"] - assert "gen_ai.usage.output_tokens" in invoke_agent_span["data"] - assert "gen_ai.usage.total_tokens" in invoke_agent_span["data"] + assert invoke_agent_span["name"] == "invoke_agent test_graph" + assert "gen_ai.usage.input_tokens" in invoke_agent_span["attributes"] + assert "gen_ai.usage.output_tokens" in invoke_agent_span["attributes"] + assert "gen_ai.usage.total_tokens" in invoke_agent_span["attributes"] # The usage should match the mock_usage values (aggregated across all calls) - assert invoke_agent_span["data"]["gen_ai.usage.input_tokens"] == 10 - assert invoke_agent_span["data"]["gen_ai.usage.output_tokens"] == 20 - assert invoke_agent_span["data"]["gen_ai.usage.total_tokens"] == 30 + assert invoke_agent_span["attributes"]["gen_ai.usage.input_tokens"] == 10 + assert invoke_agent_span["attributes"]["gen_ai.usage.output_tokens"] == 20 + assert invoke_agent_span["attributes"]["gen_ai.usage.total_tokens"] == 30 -def test_pregel_ainvoke_span_includes_usage_data(sentry_init, capture_events): +def test_pregel_ainvoke_span_includes_usage_data(sentry_init, capture_items): """ Test that invoke_agent spans include aggregated usage data from context_wrapper. This verifies the new functionality added to track token usage in invoke_agent spans. @@ -595,7 +606,7 @@ def test_pregel_ainvoke_span_includes_usage_data(sentry_init, capture_events): integrations=[LanggraphIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") test_state = { "messages": [ @@ -643,29 +654,29 @@ async def run_test(): result = asyncio.run(run_test()) assert result is not None - tx = events[0] - assert tx["type"] == "transaction" - + spans = [item.payload for item in items if item.type == "span"] invoke_spans = [ - span for span in tx["spans"] if span["op"] == OP.GEN_AI_INVOKE_AGENT + span + for span in spans + if span["attributes"]["sentry.op"] == OP.GEN_AI_INVOKE_AGENT ] assert len(invoke_spans) == 1 invoke_agent_span = invoke_spans[0] # Verify invoke_agent span has usage data - assert invoke_agent_span["description"] == "invoke_agent test_graph" - assert "gen_ai.usage.input_tokens" in invoke_agent_span["data"] - assert "gen_ai.usage.output_tokens" in invoke_agent_span["data"] - assert "gen_ai.usage.total_tokens" in invoke_agent_span["data"] + assert invoke_agent_span["name"] == "invoke_agent test_graph" + assert "gen_ai.usage.input_tokens" in invoke_agent_span["attributes"] + assert "gen_ai.usage.output_tokens" in invoke_agent_span["attributes"] + assert "gen_ai.usage.total_tokens" in invoke_agent_span["attributes"] # The usage should match the mock_usage values (aggregated across all calls) - assert invoke_agent_span["data"]["gen_ai.usage.input_tokens"] == 10 - assert invoke_agent_span["data"]["gen_ai.usage.output_tokens"] == 20 - assert invoke_agent_span["data"]["gen_ai.usage.total_tokens"] == 30 + assert invoke_agent_span["attributes"]["gen_ai.usage.input_tokens"] == 10 + assert invoke_agent_span["attributes"]["gen_ai.usage.output_tokens"] == 20 + assert invoke_agent_span["attributes"]["gen_ai.usage.total_tokens"] == 30 -def test_pregel_invoke_multiple_llm_calls_aggregate_usage(sentry_init, capture_events): +def test_pregel_invoke_multiple_llm_calls_aggregate_usage(sentry_init, capture_items): """ Test that invoke_agent spans show aggregated usage across multiple LLM calls (e.g., when tools are used and multiple API calls are made). @@ -674,7 +685,7 @@ def test_pregel_invoke_multiple_llm_calls_aggregate_usage(sentry_init, capture_e integrations=[LanggraphIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") test_state = { "messages": [ @@ -730,23 +741,23 @@ def original_invoke(self, *args, **kwargs): assert result is not None - tx = events[0] - assert tx["type"] == "transaction" - + spans = [item.payload for item in items if item.type == "span"] invoke_spans = [ - span for span in tx["spans"] if span["op"] == OP.GEN_AI_INVOKE_AGENT + span + for span in spans + if span["attributes"]["sentry.op"] == OP.GEN_AI_INVOKE_AGENT ] assert len(invoke_spans) == 1 invoke_agent_span = invoke_spans[0] # Verify invoke_agent span has aggregated usage from both API calls # Total: 10 + 20 = 30 input tokens, 5 + 15 = 20 output tokens, 15 + 35 = 50 total - assert invoke_agent_span["data"]["gen_ai.usage.input_tokens"] == 30 - assert invoke_agent_span["data"]["gen_ai.usage.output_tokens"] == 20 - assert invoke_agent_span["data"]["gen_ai.usage.total_tokens"] == 50 + assert invoke_agent_span["attributes"]["gen_ai.usage.input_tokens"] == 30 + assert invoke_agent_span["attributes"]["gen_ai.usage.output_tokens"] == 20 + assert invoke_agent_span["attributes"]["gen_ai.usage.total_tokens"] == 50 -def test_pregel_ainvoke_multiple_llm_calls_aggregate_usage(sentry_init, capture_events): +def test_pregel_ainvoke_multiple_llm_calls_aggregate_usage(sentry_init, capture_items): """ Test that invoke_agent spans show aggregated usage across multiple LLM calls (e.g., when tools are used and multiple API calls are made). @@ -755,7 +766,7 @@ def test_pregel_ainvoke_multiple_llm_calls_aggregate_usage(sentry_init, capture_ integrations=[LanggraphIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") test_state = { "messages": [ @@ -814,23 +825,23 @@ async def run_test(): result = asyncio.run(run_test()) assert result is not None - tx = events[0] - assert tx["type"] == "transaction" - + spans = [item.payload for item in items if item.type == "span"] invoke_spans = [ - span for span in tx["spans"] if span["op"] == OP.GEN_AI_INVOKE_AGENT + span + for span in spans + if span["attributes"]["sentry.op"] == OP.GEN_AI_INVOKE_AGENT ] assert len(invoke_spans) == 1 invoke_agent_span = invoke_spans[0] # Verify invoke_agent span has aggregated usage from both API calls # Total: 10 + 20 = 30 input tokens, 5 + 15 = 20 output tokens, 15 + 35 = 50 total - assert invoke_agent_span["data"]["gen_ai.usage.input_tokens"] == 30 - assert invoke_agent_span["data"]["gen_ai.usage.output_tokens"] == 20 - assert invoke_agent_span["data"]["gen_ai.usage.total_tokens"] == 50 + assert invoke_agent_span["attributes"]["gen_ai.usage.input_tokens"] == 30 + assert invoke_agent_span["attributes"]["gen_ai.usage.output_tokens"] == 20 + assert invoke_agent_span["attributes"]["gen_ai.usage.total_tokens"] == 50 -def test_pregel_invoke_span_includes_response_model(sentry_init, capture_events): +def test_pregel_invoke_span_includes_response_model(sentry_init, capture_items): """ Test that invoke_agent spans include the response model. When an agent makes multiple LLM calls, it should report the last model used. @@ -839,7 +850,7 @@ def test_pregel_invoke_span_includes_response_model(sentry_init, capture_events) integrations=[LanggraphIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") test_state = { "messages": [ @@ -884,23 +895,25 @@ def original_invoke(self, *args, **kwargs): assert result is not None - tx = events[0] - assert tx["type"] == "transaction" - + spans = [item.payload for item in items if item.type == "span"] invoke_spans = [ - span for span in tx["spans"] if span["op"] == OP.GEN_AI_INVOKE_AGENT + span + for span in spans + if span["attributes"]["sentry.op"] == OP.GEN_AI_INVOKE_AGENT ] assert len(invoke_spans) == 1 invoke_agent_span = invoke_spans[0] # Verify invoke_agent span has response model - assert invoke_agent_span["description"] == "invoke_agent test_graph" - assert "gen_ai.response.model" in invoke_agent_span["data"] - assert invoke_agent_span["data"]["gen_ai.response.model"] == "gpt-4.1-2025-04-14" + assert invoke_agent_span["name"] == "invoke_agent test_graph" + assert "gen_ai.response.model" in invoke_agent_span["attributes"] + assert ( + invoke_agent_span["attributes"]["gen_ai.response.model"] == "gpt-4.1-2025-04-14" + ) -def test_pregel_ainvoke_span_includes_response_model(sentry_init, capture_events): +def test_pregel_ainvoke_span_includes_response_model(sentry_init, capture_items): """ Test that invoke_agent spans include the response model. When an agent makes multiple LLM calls, it should report the last model used. @@ -909,7 +922,7 @@ def test_pregel_ainvoke_span_includes_response_model(sentry_init, capture_events integrations=[LanggraphIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") test_state = { "messages": [ @@ -957,23 +970,25 @@ async def run_test(): result = asyncio.run(run_test()) assert result is not None - tx = events[0] - assert tx["type"] == "transaction" - + spans = [item.payload for item in items if item.type == "span"] invoke_spans = [ - span for span in tx["spans"] if span["op"] == OP.GEN_AI_INVOKE_AGENT + span + for span in spans + if span["attributes"]["sentry.op"] == OP.GEN_AI_INVOKE_AGENT ] assert len(invoke_spans) == 1 invoke_agent_span = invoke_spans[0] # Verify invoke_agent span has response model - assert invoke_agent_span["description"] == "invoke_agent test_graph" - assert "gen_ai.response.model" in invoke_agent_span["data"] - assert invoke_agent_span["data"]["gen_ai.response.model"] == "gpt-4.1-2025-04-14" + assert invoke_agent_span["name"] == "invoke_agent test_graph" + assert "gen_ai.response.model" in invoke_agent_span["attributes"] + assert ( + invoke_agent_span["attributes"]["gen_ai.response.model"] == "gpt-4.1-2025-04-14" + ) -def test_pregel_invoke_span_uses_last_response_model(sentry_init, capture_events): +def test_pregel_invoke_span_uses_last_response_model(sentry_init, capture_items): """ Test that when an agent makes multiple LLM calls (e.g., with tools), the invoke_agent span reports the last response model used. @@ -982,7 +997,7 @@ def test_pregel_invoke_span_uses_last_response_model(sentry_init, capture_events integrations=[LanggraphIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") test_state = { "messages": [ @@ -1040,22 +1055,24 @@ def original_invoke(self, *args, **kwargs): assert result is not None - tx = events[0] - assert tx["type"] == "transaction" - + spans = [item.payload for item in items if item.type == "span"] invoke_spans = [ - span for span in tx["spans"] if span["op"] == OP.GEN_AI_INVOKE_AGENT + span + for span in spans + if span["attributes"]["sentry.op"] == OP.GEN_AI_INVOKE_AGENT ] assert len(invoke_spans) == 1 invoke_agent_span = invoke_spans[0] # Verify invoke_agent span uses the LAST response model - assert "gen_ai.response.model" in invoke_agent_span["data"] - assert invoke_agent_span["data"]["gen_ai.response.model"] == "gpt-4.1-2025-04-14" + assert "gen_ai.response.model" in invoke_agent_span["attributes"] + assert ( + invoke_agent_span["attributes"]["gen_ai.response.model"] == "gpt-4.1-2025-04-14" + ) -def test_pregel_ainvoke_span_uses_last_response_model(sentry_init, capture_events): +def test_pregel_ainvoke_span_uses_last_response_model(sentry_init, capture_items): """ Test that when an agent makes multiple LLM calls (e.g., with tools), the invoke_agent span reports the last response model used. @@ -1064,7 +1081,7 @@ def test_pregel_ainvoke_span_uses_last_response_model(sentry_init, capture_event integrations=[LanggraphIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") test_state = { "messages": [ @@ -1125,19 +1142,21 @@ async def run_test(): result = asyncio.run(run_test()) assert result is not None - tx = events[0] - assert tx["type"] == "transaction" - + spans = [item.payload for item in items if item.type == "span"] invoke_spans = [ - span for span in tx["spans"] if span["op"] == OP.GEN_AI_INVOKE_AGENT + span + for span in spans + if span["attributes"]["sentry.op"] == OP.GEN_AI_INVOKE_AGENT ] assert len(invoke_spans) == 1 invoke_agent_span = invoke_spans[0] # Verify invoke_agent span uses the LAST response model - assert "gen_ai.response.model" in invoke_agent_span["data"] - assert invoke_agent_span["data"]["gen_ai.response.model"] == "gpt-4.1-2025-04-14" + assert "gen_ai.response.model" in invoke_agent_span["attributes"] + assert ( + invoke_agent_span["attributes"]["gen_ai.response.model"] == "gpt-4.1-2025-04-14" + ) def test_complex_message_parsing(): @@ -1187,14 +1206,14 @@ def test_complex_message_parsing(): assert result[2]["function_call"]["name"] == "search" -def test_extraction_functions_complex_scenario(sentry_init, capture_events): +def test_extraction_functions_complex_scenario(sentry_init, capture_items): """Test extraction functions with complex scenarios including multiple messages and edge cases.""" sentry_init( integrations=[LanggraphIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") pregel = MockPregelInstance("complex_graph") test_state = {"messages": [MockMessage("Complex request", name="user")]} @@ -1235,21 +1254,23 @@ def original_invoke(self, *args, **kwargs): assert result is not None - tx = events[0] + spans = [item.payload for item in items if item.type == "span"] invoke_spans = [ - span for span in tx["spans"] if span["op"] == OP.GEN_AI_INVOKE_AGENT + span + for span in spans + if span["attributes"]["sentry.op"] == OP.GEN_AI_INVOKE_AGENT ] assert len(invoke_spans) == 1 invoke_span = invoke_spans[0] - assert SPANDATA.GEN_AI_RESPONSE_TEXT in invoke_span["data"] - response_text = invoke_span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] + assert SPANDATA.GEN_AI_RESPONSE_TEXT in invoke_span["attributes"] + response_text = invoke_span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] assert response_text == "Final response" - assert SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS in invoke_span["data"] + assert SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS in invoke_span["attributes"] import json - tool_calls_data = invoke_span["data"][SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS] + tool_calls_data = invoke_span["attributes"][SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS] if isinstance(tool_calls_data, str): tool_calls_data = json.loads(tool_calls_data) @@ -1260,14 +1281,14 @@ def original_invoke(self, *args, **kwargs): assert tool_calls_data[1]["function"]["name"] == "calculate" -def test_langgraph_message_role_mapping(sentry_init, capture_events): +def test_langgraph_message_role_mapping(sentry_init, capture_items): """Test that Langgraph integration properly maps message roles like 'ai' to 'assistant'""" sentry_init( integrations=[LanggraphIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") # Mock a langgraph message with mixed roles class MockMessage: @@ -1297,17 +1318,18 @@ def __init__(self, content, message_type="human"): ) wrapped_invoke(pregel, state_data) - (event,) = events - span = event["spans"][0] + span = next(item.payload for item in items if item.type == "span") # Verify that the span was created correctly - assert span["op"] == "gen_ai.invoke_agent" + assert span["attributes"]["sentry.op"] == "gen_ai.invoke_agent" # If messages were captured, verify role mapping - if SPANDATA.GEN_AI_REQUEST_MESSAGES in span["data"]: + if SPANDATA.GEN_AI_REQUEST_MESSAGES in span["attributes"]: import json - stored_messages = json.loads(span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + stored_messages = json.loads( + span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + ) # Find messages with specific content to verify role mapping ai_message = next( @@ -1331,7 +1353,7 @@ def __init__(self, content, message_type="human"): assert "ai" not in roles -def test_langgraph_message_truncation(sentry_init, capture_events): +def test_langgraph_message_truncation(sentry_init, capture_items): """Test that large messages are truncated properly in Langgraph integration.""" import json @@ -1340,7 +1362,7 @@ def test_langgraph_message_truncation(sentry_init, capture_events): traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") large_content = ( "This is a very long message that will exceed our size limits. " * 1000 @@ -1365,23 +1387,25 @@ def original_invoke(self, *args, **kwargs): result = wrapped_invoke(pregel, test_state) assert result is not None - assert len(events) > 0 - tx = events[0] - assert tx["type"] == "transaction" + spans = [item.payload for item in items if item.type == "span"] invoke_spans = [ - span for span in tx.get("spans", []) if span.get("op") == OP.GEN_AI_INVOKE_AGENT + span + for span in spans + if span["attributes"].get("sentry.op") == OP.GEN_AI_INVOKE_AGENT ] assert len(invoke_spans) > 0 invoke_span = invoke_spans[0] - assert SPANDATA.GEN_AI_REQUEST_MESSAGES in invoke_span["data"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES in invoke_span["attributes"] - messages_data = invoke_span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + messages_data = invoke_span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] assert isinstance(messages_data, str) parsed_messages = json.loads(messages_data) assert isinstance(parsed_messages, list) assert len(parsed_messages) == 1 assert "small message 5" in str(parsed_messages[0]) + + (tx,) = (item.payload for item in items if item.type == "transaction") assert tx["_meta"]["spans"]["0"]["data"]["gen_ai.request.messages"][""]["len"] == 5 diff --git a/tests/integrations/litellm/test_litellm.py b/tests/integrations/litellm/test_litellm.py index a8df5891ce..90807744e7 100644 --- a/tests/integrations/litellm/test_litellm.py +++ b/tests/integrations/litellm/test_litellm.py @@ -142,7 +142,7 @@ def __init__( def test_nonstreaming_chat_completion( reset_litellm_executor, sentry_init, - capture_events, + capture_items, send_default_pii, include_prompts, get_model_response, @@ -153,7 +153,7 @@ def test_nonstreaming_chat_completion( traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [{"role": "user", "content": "Hello!"}] @@ -179,37 +179,36 @@ def test_nonstreaming_chat_completion( litellm_utils.executor.shutdown(wait=True) - assert len(events) == 1 - (event,) = events - - assert event["type"] == "transaction" + (event,) = (item.payload for item in items if item.type == "transaction") assert event["transaction"] == "litellm test" + spans = [item.payload for item in items if item.type == "span"] chat_spans = list( x - for x in event["spans"] - if x["op"] == OP.GEN_AI_CHAT and x["origin"] == "auto.ai.litellm" + for x in spans + if x["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + and x["attributes"]["sentry.origin"] == "auto.ai.litellm" ) assert len(chat_spans) == 1 span = chat_spans[0] - assert span["op"] == OP.GEN_AI_CHAT - assert span["description"] == "chat gpt-3.5-turbo" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "gpt-3.5-turbo" - assert span["data"][SPANDATA.GEN_AI_RESPONSE_MODEL] == "gpt-3.5-turbo" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "openai" - assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + assert span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + assert span["name"] == "chat gpt-3.5-turbo" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "gpt-3.5-turbo" + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_MODEL] == "gpt-3.5-turbo" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "openai" + assert span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" if send_default_pii and include_prompts: - assert SPANDATA.GEN_AI_REQUEST_MESSAGES in span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT in span["data"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES in span["attributes"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT in span["attributes"] else: - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["attributes"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["attributes"] - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30 @pytest.mark.asyncio(loop_scope="session") @@ -224,7 +223,7 @@ def test_nonstreaming_chat_completion( ) async def test_async_nonstreaming_chat_completion( sentry_init, - capture_events, + capture_items, send_default_pii, include_prompts, get_model_response, @@ -235,7 +234,7 @@ async def test_async_nonstreaming_chat_completion( traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [{"role": "user", "content": "Hello!"}] @@ -262,37 +261,36 @@ async def test_async_nonstreaming_chat_completion( await GLOBAL_LOGGING_WORKER.flush() await asyncio.sleep(0.5) - assert len(events) == 1 - (event,) = events - - assert event["type"] == "transaction" + (event,) = (item.payload for item in items if item.type == "transaction") assert event["transaction"] == "litellm test" + spans = [item.payload for item in items if item.type == "span"] chat_spans = list( x - for x in event["spans"] - if x["op"] == OP.GEN_AI_CHAT and x["origin"] == "auto.ai.litellm" + for x in spans + if x["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + and x["attributes"]["sentry.origin"] == "auto.ai.litellm" ) assert len(chat_spans) == 1 span = chat_spans[0] - assert span["op"] == OP.GEN_AI_CHAT - assert span["description"] == "chat gpt-3.5-turbo" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "gpt-3.5-turbo" - assert span["data"][SPANDATA.GEN_AI_RESPONSE_MODEL] == "gpt-3.5-turbo" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "openai" - assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" + assert span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + assert span["name"] == "chat gpt-3.5-turbo" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "gpt-3.5-turbo" + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_MODEL] == "gpt-3.5-turbo" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "openai" + assert span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "chat" if send_default_pii and include_prompts: - assert SPANDATA.GEN_AI_REQUEST_MESSAGES in span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT in span["data"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES in span["attributes"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT in span["attributes"] else: - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["attributes"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["attributes"] - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20 + assert span["attributes"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30 @pytest.mark.parametrize( @@ -307,7 +305,7 @@ async def test_async_nonstreaming_chat_completion( def test_streaming_chat_completion( reset_litellm_executor, sentry_init, - capture_events, + capture_items, send_default_pii, include_prompts, get_model_response, @@ -319,7 +317,7 @@ def test_streaming_chat_completion( traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("span") messages = [{"role": "user", "content": "Hello!"}] @@ -350,20 +348,18 @@ def test_streaming_chat_completion( streaming_handler.executor.shutdown(wait=True) - assert len(events) == 1 - (event,) = events - - assert event["type"] == "transaction" + spans = [item.payload for item in items if item.type == "span"] chat_spans = list( x - for x in event["spans"] - if x["op"] == OP.GEN_AI_CHAT and x["origin"] == "auto.ai.litellm" + for x in spans + if x["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + and x["attributes"]["sentry.origin"] == "auto.ai.litellm" ) assert len(chat_spans) == 1 span = chat_spans[0] - assert span["op"] == OP.GEN_AI_CHAT - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True + assert span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True @pytest.mark.asyncio(loop_scope="session") @@ -378,7 +374,7 @@ def test_streaming_chat_completion( ) async def test_async_streaming_chat_completion( sentry_init, - capture_events, + capture_items, send_default_pii, include_prompts, get_model_response, @@ -391,7 +387,7 @@ async def test_async_streaming_chat_completion( traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [{"role": "user", "content": "Hello!"}] @@ -425,25 +421,23 @@ async def test_async_streaming_chat_completion( await GLOBAL_LOGGING_WORKER.flush() await asyncio.sleep(0.5) - assert len(events) == 1 - (event,) = events - - assert event["type"] == "transaction" + spans = [item.payload for item in items if item.type == "span"] chat_spans = list( x - for x in event["spans"] - if x["op"] == OP.GEN_AI_CHAT and x["origin"] == "auto.ai.litellm" + for x in spans + if x["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + and x["attributes"]["sentry.origin"] == "auto.ai.litellm" ) assert len(chat_spans) == 1 span = chat_spans[0] - assert span["op"] == OP.GEN_AI_CHAT - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True + assert span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True def test_embeddings_create( sentry_init, - capture_events, + capture_items, get_model_response, openai_embedding_model_response, clear_litellm_cache, @@ -459,7 +453,7 @@ def test_embeddings_create( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") client = OpenAI(api_key="test-key") @@ -485,32 +479,34 @@ def test_embeddings_create( # Response is processed by litellm, so just check it exists assert response is not None - assert len(events) == 1 - (event,) = events - assert event["type"] == "transaction" + spans = [item.payload for item in items if item.type == "span"] spans = list( x - for x in event["spans"] - if x["op"] == OP.GEN_AI_EMBEDDINGS and x["origin"] == "auto.ai.litellm" + for x in spans + if x["attributes"]["sentry.op"] == OP.GEN_AI_EMBEDDINGS + and x["attributes"]["sentry.origin"] == "auto.ai.litellm" ) assert len(spans) == 1 span = spans[0] - assert span["op"] == OP.GEN_AI_EMBEDDINGS - assert span["description"] == "embeddings text-embedding-ada-002" - assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "embeddings" - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 5 - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "text-embedding-ada-002" + assert span["attributes"]["sentry.op"] == OP.GEN_AI_EMBEDDINGS + assert span["name"] == "embeddings text-embedding-ada-002" + assert span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "embeddings" + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 5 + assert ( + span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] + == "text-embedding-ada-002" + ) # Check that embeddings input is captured (it's JSON serialized) - embeddings_input = span["data"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT] + embeddings_input = span["attributes"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT] assert json.loads(embeddings_input) == ["Hello, world!"] @pytest.mark.asyncio(loop_scope="session") async def test_async_embeddings_create( sentry_init, - capture_events, + capture_items, get_model_response, openai_embedding_model_response, clear_litellm_cache, @@ -526,7 +522,7 @@ async def test_async_embeddings_create( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") client = AsyncOpenAI(api_key="test-key") @@ -553,31 +549,33 @@ async def test_async_embeddings_create( # Response is processed by litellm, so just check it exists assert response is not None - assert len(events) == 1 - (event,) = events - assert event["type"] == "transaction" + spans = [item.payload for item in items if item.type == "span"] spans = list( x - for x in event["spans"] - if x["op"] == OP.GEN_AI_EMBEDDINGS and x["origin"] == "auto.ai.litellm" + for x in spans + if x["attributes"]["sentry.op"] == OP.GEN_AI_EMBEDDINGS + and x["attributes"]["sentry.origin"] == "auto.ai.litellm" ) assert len(spans) == 1 span = spans[0] - assert span["op"] == OP.GEN_AI_EMBEDDINGS - assert span["description"] == "embeddings text-embedding-ada-002" - assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "embeddings" - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 5 - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "text-embedding-ada-002" + assert span["attributes"]["sentry.op"] == OP.GEN_AI_EMBEDDINGS + assert span["name"] == "embeddings text-embedding-ada-002" + assert span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "embeddings" + assert span["attributes"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 5 + assert ( + span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] + == "text-embedding-ada-002" + ) # Check that embeddings input is captured (it's JSON serialized) - embeddings_input = span["data"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT] + embeddings_input = span["attributes"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT] assert json.loads(embeddings_input) == ["Hello, world!"] def test_embeddings_create_with_list_input( sentry_init, - capture_events, + capture_items, get_model_response, openai_embedding_model_response, clear_litellm_cache, @@ -588,7 +586,7 @@ def test_embeddings_create_with_list_input( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") client = OpenAI(api_key="test-key") @@ -614,22 +612,21 @@ def test_embeddings_create_with_list_input( # Response is processed by litellm, so just check it exists assert response is not None - assert len(events) == 1 - (event,) = events - assert event["type"] == "transaction" + spans = [item.payload for item in items if item.type == "span"] spans = list( x - for x in event["spans"] - if x["op"] == OP.GEN_AI_EMBEDDINGS and x["origin"] == "auto.ai.litellm" + for x in spans + if x["attributes"]["sentry.op"] == OP.GEN_AI_EMBEDDINGS + and x["attributes"]["sentry.origin"] == "auto.ai.litellm" ) assert len(spans) == 1 span = spans[0] - assert span["op"] == OP.GEN_AI_EMBEDDINGS - assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "embeddings" + assert span["attributes"]["sentry.op"] == OP.GEN_AI_EMBEDDINGS + assert span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "embeddings" # Check that list of embeddings input is captured (it's JSON serialized) - embeddings_input = span["data"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT] + embeddings_input = span["attributes"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT] assert json.loads(embeddings_input) == [ "First text", "Second text", @@ -640,7 +637,7 @@ def test_embeddings_create_with_list_input( @pytest.mark.asyncio(loop_scope="session") async def test_async_embeddings_create_with_list_input( sentry_init, - capture_events, + capture_items, get_model_response, openai_embedding_model_response, clear_litellm_cache, @@ -651,7 +648,7 @@ async def test_async_embeddings_create_with_list_input( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") client = AsyncOpenAI(api_key="test-key") @@ -678,22 +675,21 @@ async def test_async_embeddings_create_with_list_input( # Response is processed by litellm, so just check it exists assert response is not None - assert len(events) == 1 - (event,) = events - assert event["type"] == "transaction" + spans = [item.payload for item in items if item.type == "span"] spans = list( x - for x in event["spans"] - if x["op"] == OP.GEN_AI_EMBEDDINGS and x["origin"] == "auto.ai.litellm" + for x in spans + if x["attributes"]["sentry.op"] == OP.GEN_AI_EMBEDDINGS + and x["attributes"]["sentry.origin"] == "auto.ai.litellm" ) assert len(spans) == 1 span = spans[0] - assert span["op"] == OP.GEN_AI_EMBEDDINGS - assert span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "embeddings" + assert span["attributes"]["sentry.op"] == OP.GEN_AI_EMBEDDINGS + assert span["attributes"][SPANDATA.GEN_AI_OPERATION_NAME] == "embeddings" # Check that list of embeddings input is captured (it's JSON serialized) - embeddings_input = span["data"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT] + embeddings_input = span["attributes"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT] assert json.loads(embeddings_input) == [ "First text", "Second text", @@ -703,7 +699,7 @@ async def test_async_embeddings_create_with_list_input( def test_embeddings_no_pii( sentry_init, - capture_events, + capture_items, get_model_response, openai_embedding_model_response, clear_litellm_cache, @@ -714,7 +710,7 @@ def test_embeddings_no_pii( traces_sample_rate=1.0, send_default_pii=False, # PII disabled ) - events = capture_events() + items = capture_items("transaction", "span") client = OpenAI(api_key="test-key") @@ -740,27 +736,26 @@ def test_embeddings_no_pii( # Response is processed by litellm, so just check it exists assert response is not None - assert len(events) == 1 - (event,) = events - assert event["type"] == "transaction" + spans = [item.payload for item in items if item.type == "span"] spans = list( x - for x in event["spans"] - if x["op"] == OP.GEN_AI_EMBEDDINGS and x["origin"] == "auto.ai.litellm" + for x in spans + if x["attributes"]["sentry.op"] == OP.GEN_AI_EMBEDDINGS + and x["attributes"]["sentry.origin"] == "auto.ai.litellm" ) assert len(spans) == 1 span = spans[0] - assert span["op"] == OP.GEN_AI_EMBEDDINGS + assert span["attributes"]["sentry.op"] == OP.GEN_AI_EMBEDDINGS # Check that embeddings input is NOT captured when PII is disabled - assert SPANDATA.GEN_AI_EMBEDDINGS_INPUT not in span["data"] + assert SPANDATA.GEN_AI_EMBEDDINGS_INPUT not in span["attributes"] @pytest.mark.asyncio(loop_scope="session") async def test_async_embeddings_no_pii( sentry_init, - capture_events, + capture_items, get_model_response, openai_embedding_model_response, clear_litellm_cache, @@ -771,7 +766,7 @@ async def test_async_embeddings_no_pii( traces_sample_rate=1.0, send_default_pii=False, # PII disabled ) - events = capture_events() + items = capture_items("transaction", "span") client = AsyncOpenAI(api_key="test-key") @@ -798,31 +793,30 @@ async def test_async_embeddings_no_pii( # Response is processed by litellm, so just check it exists assert response is not None - assert len(events) == 1 - (event,) = events - assert event["type"] == "transaction" + spans = [item.payload for item in items if item.type == "span"] spans = list( x - for x in event["spans"] - if x["op"] == OP.GEN_AI_EMBEDDINGS and x["origin"] == "auto.ai.litellm" + for x in spans + if x["attributes"]["sentry.op"] == OP.GEN_AI_EMBEDDINGS + and x["attributes"]["sentry.origin"] == "auto.ai.litellm" ) assert len(spans) == 1 span = spans[0] - assert span["op"] == OP.GEN_AI_EMBEDDINGS + assert span["attributes"]["sentry.op"] == OP.GEN_AI_EMBEDDINGS # Check that embeddings input is NOT captured when PII is disabled - assert SPANDATA.GEN_AI_EMBEDDINGS_INPUT not in span["data"] + assert SPANDATA.GEN_AI_EMBEDDINGS_INPUT not in span["attributes"] def test_exception_handling( - reset_litellm_executor, sentry_init, capture_events, get_rate_limit_model_response + reset_litellm_executor, sentry_init, capture_items, get_rate_limit_model_response ): sentry_init( integrations=[LiteLLMIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("event") messages = [{"role": "user", "content": "Hello!"}] @@ -843,22 +837,24 @@ def test_exception_handling( client=client, ) - # Should have error event and transaction - assert len(events) >= 1 # Find the error event - error_events = [e for e in events if e.get("level") == "error"] + error_events = [ + item.payload + for item in items + if item.type == "event" and item.payload.get("level") == "error" + ] assert len(error_events) == 1 @pytest.mark.asyncio(loop_scope="session") async def test_async_exception_handling( - sentry_init, capture_events, get_rate_limit_model_response + sentry_init, capture_items, get_rate_limit_model_response ): sentry_init( integrations=[LiteLLMIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("event") messages = [{"role": "user", "content": "Hello!"}] @@ -879,17 +875,19 @@ async def test_async_exception_handling( client=client, ) - # Should have error event and transaction - assert len(events) >= 1 # Find the error event - error_events = [e for e in events if e.get("level") == "error"] + error_events = [ + item.payload + for item in items + if item.type == "event" and item.payload.get("level") == "error" + ] assert len(error_events) == 1 def test_span_origin( reset_litellm_executor, sentry_init, - capture_events, + capture_items, get_model_response, nonstreaming_chat_completions_model_response, ): @@ -897,7 +895,7 @@ def test_span_origin( integrations=[LiteLLMIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [{"role": "user", "content": "Hello!"}] @@ -923,16 +921,17 @@ def test_span_origin( litellm_utils.executor.shutdown(wait=True) - (event,) = events - + (event,) = (item.payload for item in items if item.type == "transaction") assert event["contexts"]["trace"]["origin"] == "manual" - assert event["spans"][0]["origin"] == "auto.ai.litellm" + + spans = [item.payload for item in items if item.type == "span"] + assert spans[0]["attributes"]["sentry.origin"] == "auto.ai.litellm" def test_multiple_providers( reset_litellm_executor, sentry_init, - capture_events, + capture_items, get_model_response, nonstreaming_chat_completions_model_response, nonstreaming_anthropic_model_response, @@ -943,7 +942,7 @@ def test_multiple_providers( integrations=[LiteLLMIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction") messages = [{"role": "user", "content": "Hello!"}] @@ -1015,18 +1014,19 @@ def test_multiple_providers( litellm_utils.executor.shutdown(wait=True) + events = [item.payload for item in items if item.type == "transaction"] assert len(events) == 3 - for i in range(3): - span = events[i]["spans"][0] + spans = [item.payload for item in items if item.type == "span"] + for span in spans: # The provider should be detected by litellm.get_llm_provider - assert SPANDATA.GEN_AI_SYSTEM in span["data"] + assert SPANDATA.GEN_AI_SYSTEM in span["attributes"] @pytest.mark.asyncio(loop_scope="session") async def test_async_multiple_providers( sentry_init, - capture_events, + capture_items, get_model_response, nonstreaming_chat_completions_model_response, nonstreaming_anthropic_model_response, @@ -1037,7 +1037,7 @@ async def test_async_multiple_providers( integrations=[LiteLLMIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [{"role": "user", "content": "Hello!"}] @@ -1112,18 +1112,19 @@ async def test_async_multiple_providers( await GLOBAL_LOGGING_WORKER.flush() await asyncio.sleep(0.5) + events = [item.payload for item in items if item.type == "transaction"] assert len(events) == 3 - for i in range(3): - span = events[i]["spans"][0] + spans = [item.payload for item in items if item.type == "span"] + for span in spans: # The provider should be detected by litellm.get_llm_provider - assert SPANDATA.GEN_AI_SYSTEM in span["data"] + assert SPANDATA.GEN_AI_SYSTEM in span["attributes"] def test_additional_parameters( reset_litellm_executor, sentry_init, - capture_events, + capture_items, get_model_response, nonstreaming_chat_completions_model_response, ): @@ -1132,7 +1133,7 @@ def test_additional_parameters( integrations=[LiteLLMIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [{"role": "user", "content": "Hello!"}] client = OpenAI(api_key="test-key") @@ -1162,26 +1163,27 @@ def test_additional_parameters( litellm_utils.executor.shutdown(wait=True) - (event,) = events + spans = [item.payload for item in items if item.type == "span"] chat_spans = list( x - for x in event["spans"] - if x["op"] == OP.GEN_AI_CHAT and x["origin"] == "auto.ai.litellm" + for x in spans + if x["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + and x["attributes"]["sentry.origin"] == "auto.ai.litellm" ) assert len(chat_spans) == 1 span = chat_spans[0] - assert span["data"][SPANDATA.GEN_AI_REQUEST_TEMPERATURE] == 0.7 - assert span["data"][SPANDATA.GEN_AI_REQUEST_MAX_TOKENS] == 100 - assert span["data"][SPANDATA.GEN_AI_REQUEST_TOP_P] == 0.9 - assert span["data"][SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY] == 0.5 - assert span["data"][SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY] == 0.5 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_TEMPERATURE] == 0.7 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MAX_TOKENS] == 100 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_TOP_P] == 0.9 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY] == 0.5 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY] == 0.5 @pytest.mark.asyncio(loop_scope="session") async def test_async_additional_parameters( sentry_init, - capture_events, + capture_items, get_model_response, nonstreaming_chat_completions_model_response, ): @@ -1190,7 +1192,7 @@ async def test_async_additional_parameters( integrations=[LiteLLMIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [{"role": "user", "content": "Hello!"}] client = AsyncOpenAI(api_key="test-key") @@ -1221,26 +1223,27 @@ async def test_async_additional_parameters( await GLOBAL_LOGGING_WORKER.flush() await asyncio.sleep(0.5) - (event,) = events + spans = [item.payload for item in items if item.type == "span"] chat_spans = list( x - for x in event["spans"] - if x["op"] == OP.GEN_AI_CHAT and x["origin"] == "auto.ai.litellm" + for x in spans + if x["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + and x["attributes"]["sentry.origin"] == "auto.ai.litellm" ) assert len(chat_spans) == 1 span = chat_spans[0] - assert span["data"][SPANDATA.GEN_AI_REQUEST_TEMPERATURE] == 0.7 - assert span["data"][SPANDATA.GEN_AI_REQUEST_MAX_TOKENS] == 100 - assert span["data"][SPANDATA.GEN_AI_REQUEST_TOP_P] == 0.9 - assert span["data"][SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY] == 0.5 - assert span["data"][SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY] == 0.5 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_TEMPERATURE] == 0.7 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MAX_TOKENS] == 100 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_TOP_P] == 0.9 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY] == 0.5 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY] == 0.5 def test_no_integration( reset_litellm_executor, sentry_init, - capture_events, + capture_items, get_model_response, nonstreaming_chat_completions_model_response, ): @@ -1248,7 +1251,7 @@ def test_no_integration( sentry_init( traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [{"role": "user", "content": "Hello!"}] client = OpenAI(api_key="test-key") @@ -1273,13 +1276,12 @@ def test_no_integration( litellm_utils.executor.shutdown(wait=True) - (event,) = events - # Should still have the transaction, but no child spans since integration is off - assert event["type"] == "transaction" + spans = [item.payload for item in items if item.type == "span"] chat_spans = list( x - for x in event["spans"] - if x["op"] == OP.GEN_AI_CHAT and x["origin"] == "auto.ai.litellm" + for x in spans + if x["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + and x["attributes"]["sentry.origin"] == "auto.ai.litellm" ) assert len(chat_spans) == 0 @@ -1287,7 +1289,7 @@ def test_no_integration( @pytest.mark.asyncio(loop_scope="session") async def test_async_no_integration( sentry_init, - capture_events, + capture_items, get_model_response, nonstreaming_chat_completions_model_response, ): @@ -1295,7 +1297,7 @@ async def test_async_no_integration( sentry_init( traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [{"role": "user", "content": "Hello!"}] client = AsyncOpenAI(api_key="test-key") @@ -1321,24 +1323,23 @@ async def test_async_no_integration( await GLOBAL_LOGGING_WORKER.flush() await asyncio.sleep(0.5) - (event,) = events - # Should still have the transaction, but no child spans since integration is off - assert event["type"] == "transaction" + spans = [item.payload for item in items if item.type == "span"] chat_spans = list( x - for x in event["spans"] - if x["op"] == OP.GEN_AI_CHAT and x["origin"] == "auto.ai.litellm" + for x in spans + if x["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + and x["attributes"]["sentry.origin"] == "auto.ai.litellm" ) assert len(chat_spans) == 0 -def test_response_without_usage(sentry_init, capture_events): +def test_response_without_usage(sentry_init, capture_items): """Test handling of responses without usage information.""" sentry_init( integrations=[LiteLLMIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [{"role": "user", "content": "Hello!"}] @@ -1366,12 +1367,11 @@ def test_response_without_usage(sentry_init, capture_events): datetime.now(), ) - (event,) = events - (span,) = event["spans"] + (span,) = (item.payload for item in items if item.type == "span") # Span should still be created even without usage info - assert span["op"] == OP.GEN_AI_CHAT - assert span["description"] == "chat gpt-3.5-turbo" + assert span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + assert span["name"] == "chat gpt-3.5-turbo" def test_integration_setup(sentry_init): @@ -1387,14 +1387,14 @@ def test_integration_setup(sentry_init): assert _failure_callback in (litellm.failure_callback or []) -def test_litellm_message_truncation(sentry_init, capture_events): +def test_litellm_message_truncation(sentry_init, capture_items): """Test that large messages are truncated properly in LiteLLM integration.""" sentry_init( integrations=[LiteLLMIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") large_content = ( "This is a very long message that will exceed our size limits. " * 1000 @@ -1422,25 +1422,24 @@ def test_litellm_message_truncation(sentry_init, capture_events): datetime.now(), ) - assert len(events) > 0 - tx = events[0] - assert tx["type"] == "transaction" - + spans = [item.payload for item in items if item.type == "span"] chat_spans = [ - span for span in tx.get("spans", []) if span.get("op") == OP.GEN_AI_CHAT + span for span in spans if span["attributes"].get("sentry.op") == OP.GEN_AI_CHAT ] assert len(chat_spans) > 0 chat_span = chat_spans[0] - assert SPANDATA.GEN_AI_REQUEST_MESSAGES in chat_span["data"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES in chat_span["attributes"] - messages_data = chat_span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + messages_data = chat_span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] assert isinstance(messages_data, str) parsed_messages = json.loads(messages_data) assert isinstance(parsed_messages, list) assert len(parsed_messages) == 1 assert "small message 5" in str(parsed_messages[0]) + + tx = next(item.payload for item in items if item.type == "transaction") assert tx["_meta"]["spans"]["0"]["data"]["gen_ai.request.messages"][""]["len"] == 5 @@ -1452,7 +1451,7 @@ def test_litellm_message_truncation(sentry_init, capture_events): def test_binary_content_encoding_image_url( reset_litellm_executor, sentry_init, - capture_events, + capture_items, get_model_response, nonstreaming_chat_completions_model_response, ): @@ -1461,7 +1460,7 @@ def test_binary_content_encoding_image_url( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [ { @@ -1498,15 +1497,16 @@ def test_binary_content_encoding_image_url( litellm_utils.executor.shutdown(wait=True) - (event,) = events + spans = [item.payload for item in items if item.type == "span"] chat_spans = list( x - for x in event["spans"] - if x["op"] == OP.GEN_AI_CHAT and x["origin"] == "auto.ai.litellm" + for x in spans + if x["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + and x["attributes"]["sentry.origin"] == "auto.ai.litellm" ) assert len(chat_spans) == 1 span = chat_spans[0] - messages_data = json.loads(span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + messages_data = json.loads(span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) blob_item = next( ( @@ -1530,7 +1530,7 @@ def test_binary_content_encoding_image_url( @pytest.mark.asyncio(loop_scope="session") async def test_async_binary_content_encoding_image_url( sentry_init, - capture_events, + capture_items, get_model_response, nonstreaming_chat_completions_model_response, ): @@ -1539,7 +1539,7 @@ async def test_async_binary_content_encoding_image_url( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [ { @@ -1577,15 +1577,16 @@ async def test_async_binary_content_encoding_image_url( await GLOBAL_LOGGING_WORKER.flush() await asyncio.sleep(0.5) - (event,) = events + spans = [item.payload for item in items if item.type == "span"] chat_spans = list( x - for x in event["spans"] - if x["op"] == OP.GEN_AI_CHAT and x["origin"] == "auto.ai.litellm" + for x in spans + if x["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + and x["attributes"]["sentry.origin"] == "auto.ai.litellm" ) assert len(chat_spans) == 1 span = chat_spans[0] - messages_data = json.loads(span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + messages_data = json.loads(span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) blob_item = next( ( @@ -1609,7 +1610,7 @@ async def test_async_binary_content_encoding_image_url( def test_binary_content_encoding_mixed_content( reset_litellm_executor, sentry_init, - capture_events, + capture_items, get_model_response, nonstreaming_chat_completions_model_response, ): @@ -1618,7 +1619,7 @@ def test_binary_content_encoding_mixed_content( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [ { @@ -1656,15 +1657,16 @@ def test_binary_content_encoding_mixed_content( litellm_utils.executor.shutdown(wait=True) - (event,) = events + spans = [item.payload for item in items if item.type == "span"] chat_spans = list( x - for x in event["spans"] - if x["op"] == OP.GEN_AI_CHAT and x["origin"] == "auto.ai.litellm" + for x in spans + if x["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + and x["attributes"]["sentry.origin"] == "auto.ai.litellm" ) assert len(chat_spans) == 1 span = chat_spans[0] - messages_data = json.loads(span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + messages_data = json.loads(span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) content_items = [ item for msg in messages_data if "content" in msg for item in msg["content"] @@ -1676,7 +1678,7 @@ def test_binary_content_encoding_mixed_content( @pytest.mark.asyncio(loop_scope="session") async def test_async_binary_content_encoding_mixed_content( sentry_init, - capture_events, + capture_items, get_model_response, nonstreaming_chat_completions_model_response, ): @@ -1685,7 +1687,7 @@ async def test_async_binary_content_encoding_mixed_content( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [ { @@ -1724,15 +1726,16 @@ async def test_async_binary_content_encoding_mixed_content( await GLOBAL_LOGGING_WORKER.flush() await asyncio.sleep(0.5) - (event,) = events + spans = [item.payload for item in items if item.type == "span"] chat_spans = list( x - for x in event["spans"] - if x["op"] == OP.GEN_AI_CHAT and x["origin"] == "auto.ai.litellm" + for x in spans + if x["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + and x["attributes"]["sentry.origin"] == "auto.ai.litellm" ) assert len(chat_spans) == 1 span = chat_spans[0] - messages_data = json.loads(span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + messages_data = json.loads(span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) content_items = [ item for msg in messages_data if "content" in msg for item in msg["content"] @@ -1744,7 +1747,7 @@ async def test_async_binary_content_encoding_mixed_content( def test_binary_content_encoding_uri_type( reset_litellm_executor, sentry_init, - capture_events, + capture_items, get_model_response, nonstreaming_chat_completions_model_response, ): @@ -1753,7 +1756,7 @@ def test_binary_content_encoding_uri_type( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [ { @@ -1789,15 +1792,16 @@ def test_binary_content_encoding_uri_type( litellm_utils.executor.shutdown(wait=True) - (event,) = events + spans = [item.payload for item in items if item.type == "span"] chat_spans = list( x - for x in event["spans"] - if x["op"] == OP.GEN_AI_CHAT and x["origin"] == "auto.ai.litellm" + for x in spans + if x["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + and x["attributes"]["sentry.origin"] == "auto.ai.litellm" ) assert len(chat_spans) == 1 span = chat_spans[0] - messages_data = json.loads(span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + messages_data = json.loads(span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) uri_item = next( ( @@ -1816,7 +1820,7 @@ def test_binary_content_encoding_uri_type( @pytest.mark.asyncio(loop_scope="session") async def test_async_binary_content_encoding_uri_type( sentry_init, - capture_events, + capture_items, get_model_response, nonstreaming_chat_completions_model_response, ): @@ -1825,7 +1829,7 @@ async def test_async_binary_content_encoding_uri_type( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") messages = [ { @@ -1862,15 +1866,16 @@ async def test_async_binary_content_encoding_uri_type( await GLOBAL_LOGGING_WORKER.flush() await asyncio.sleep(0.5) - (event,) = events + spans = [item.payload for item in items if item.type == "span"] chat_spans = list( x - for x in event["spans"] - if x["op"] == OP.GEN_AI_CHAT and x["origin"] == "auto.ai.litellm" + for x in spans + if x["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + and x["attributes"]["sentry.origin"] == "auto.ai.litellm" ) assert len(chat_spans) == 1 span = chat_spans[0] - messages_data = json.loads(span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + messages_data = json.loads(span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) uri_item = next( ( diff --git a/tests/integrations/openai/test_openai.py b/tests/integrations/openai/test_openai.py index ada2e633de..4b9d629d96 100644 --- a/tests/integrations/openai/test_openai.py +++ b/tests/integrations/openai/test_openai.py @@ -132,14 +132,14 @@ async def __call__(self, *args, **kwargs): ], ) def test_nonstreaming_chat_completion_no_prompts( - sentry_init, capture_events, send_default_pii, include_prompts + sentry_init, capture_items, send_default_pii, include_prompts ): sentry_init( integrations=[OpenAIIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("span") client = OpenAI(api_key="z") client.chat.completions._post = mock.Mock(return_value=EXAMPLE_CHAT_COMPLETION) @@ -163,27 +163,26 @@ def test_nonstreaming_chat_completion_no_prompts( ) assert response == "the model response" - tx = events[0] - assert tx["type"] == "transaction" - span = tx["spans"][0] - assert span["op"] == "gen_ai.chat" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "openai" - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is False - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "some-model" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MAX_TOKENS] == 100 - assert span["data"][SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY] == 0.1 - assert span["data"][SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY] == 0.2 - assert span["data"][SPANDATA.GEN_AI_REQUEST_TEMPERATURE] == 0.7 - assert span["data"][SPANDATA.GEN_AI_REQUEST_TOP_P] == 0.9 + span = next(item.payload for item in items if item.type == "span") + assert span["attributes"]["sentry.op"] == "gen_ai.chat" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "openai" + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is False - assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in span["data"] - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "some-model" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MAX_TOKENS] == 100 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY] == 0.1 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY] == 0.2 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_TEMPERATURE] == 0.7 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_TOP_P] == 0.9 - assert span["data"]["gen_ai.usage.output_tokens"] == 10 - assert span["data"]["gen_ai.usage.input_tokens"] == 20 - assert span["data"]["gen_ai.usage.total_tokens"] == 30 + assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in span["attributes"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["attributes"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["attributes"] + + assert span["attributes"]["gen_ai.usage.output_tokens"] == 10 + assert span["attributes"]["gen_ai.usage.input_tokens"] == 20 + assert span["attributes"]["gen_ai.usage.total_tokens"] == 30 @pytest.mark.parametrize( @@ -229,13 +228,13 @@ def test_nonstreaming_chat_completion_no_prompts( ), ], ) -def test_nonstreaming_chat_completion(sentry_init, capture_events, messages, request): +def test_nonstreaming_chat_completion(sentry_init, capture_items, messages, request): sentry_init( integrations=[OpenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("span") client = OpenAI(api_key="z") client.chat.completions._post = mock.Mock(return_value=EXAMPLE_CHAT_COMPLETION) @@ -256,30 +255,29 @@ def test_nonstreaming_chat_completion(sentry_init, capture_events, messages, req ) assert response == "the model response" - tx = events[0] - assert tx["type"] == "transaction" - span = tx["spans"][0] - assert span["op"] == "gen_ai.chat" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "openai" - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is False - - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "some-model" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MAX_TOKENS] == 100 - assert span["data"][SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY] == 0.1 - assert span["data"][SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY] == 0.2 - assert span["data"][SPANDATA.GEN_AI_REQUEST_TEMPERATURE] == 0.7 - assert span["data"][SPANDATA.GEN_AI_REQUEST_TOP_P] == 0.9 + + span = next(item.payload for item in items if item.type == "span") + assert span["attributes"]["sentry.op"] == "gen_ai.chat" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "openai" + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is False + + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "some-model" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MAX_TOKENS] == 100 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY] == 0.1 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY] == 0.2 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_TEMPERATURE] == 0.7 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_TOP_P] == 0.9 param_id = request.node.callspec.id if "blocks" in param_id: - assert json.loads(span["data"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS]) == [ + assert json.loads(span["attributes"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS]) == [ { "type": "text", "content": "You are a helpful assistant.", } ] else: - assert json.loads(span["data"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS]) == [ + assert json.loads(span["attributes"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS]) == [ { "type": "text", "content": "You are a helpful assistant.", @@ -290,12 +288,12 @@ def test_nonstreaming_chat_completion(sentry_init, capture_events, messages, req }, ] - assert "hello" in span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] - assert "the model response" in span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] + assert "hello" in span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + assert "the model response" in span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] - assert span["data"]["gen_ai.usage.output_tokens"] == 10 - assert span["data"]["gen_ai.usage.input_tokens"] == 20 - assert span["data"]["gen_ai.usage.total_tokens"] == 30 + assert span["attributes"]["gen_ai.usage.output_tokens"] == 10 + assert span["attributes"]["gen_ai.usage.input_tokens"] == 20 + assert span["attributes"]["gen_ai.usage.total_tokens"] == 30 @pytest.mark.asyncio @@ -308,14 +306,14 @@ def test_nonstreaming_chat_completion(sentry_init, capture_events, messages, req ], ) async def test_nonstreaming_chat_completion_async_no_prompts( - sentry_init, capture_events, send_default_pii, include_prompts + sentry_init, capture_items, send_default_pii, include_prompts ): sentry_init( integrations=[OpenAIIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("span") client = AsyncOpenAI(api_key="z") client.chat.completions._post = mock.AsyncMock(return_value=EXAMPLE_CHAT_COMPLETION) @@ -336,27 +334,26 @@ async def test_nonstreaming_chat_completion_async_no_prompts( response = response.choices[0].message.content assert response == "the model response" - tx = events[0] - assert tx["type"] == "transaction" - span = tx["spans"][0] - assert span["op"] == "gen_ai.chat" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "openai" - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is False - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "some-model" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MAX_TOKENS] == 100 - assert span["data"][SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY] == 0.1 - assert span["data"][SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY] == 0.2 - assert span["data"][SPANDATA.GEN_AI_REQUEST_TEMPERATURE] == 0.7 - assert span["data"][SPANDATA.GEN_AI_REQUEST_TOP_P] == 0.9 + span = next(item.payload for item in items if item.type == "span") + assert span["attributes"]["sentry.op"] == "gen_ai.chat" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "openai" + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is False + + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "some-model" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MAX_TOKENS] == 100 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY] == 0.1 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY] == 0.2 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_TEMPERATURE] == 0.7 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_TOP_P] == 0.9 - assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in span["data"] - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] + assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in span["attributes"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["attributes"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["attributes"] - assert span["data"]["gen_ai.usage.output_tokens"] == 10 - assert span["data"]["gen_ai.usage.input_tokens"] == 20 - assert span["data"]["gen_ai.usage.total_tokens"] == 30 + assert span["attributes"]["gen_ai.usage.output_tokens"] == 10 + assert span["attributes"]["gen_ai.usage.input_tokens"] == 20 + assert span["attributes"]["gen_ai.usage.total_tokens"] == 30 @pytest.mark.asyncio @@ -404,14 +401,14 @@ async def test_nonstreaming_chat_completion_async_no_prompts( ], ) async def test_nonstreaming_chat_completion_async( - sentry_init, capture_events, messages, request + sentry_init, capture_items, messages, request ): sentry_init( integrations=[OpenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("span") client = AsyncOpenAI(api_key="z") client.chat.completions._post = AsyncMock(return_value=EXAMPLE_CHAT_COMPLETION) @@ -429,30 +426,29 @@ async def test_nonstreaming_chat_completion_async( response = response.choices[0].message.content assert response == "the model response" - tx = events[0] - assert tx["type"] == "transaction" - span = tx["spans"][0] - assert span["op"] == "gen_ai.chat" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "openai" - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is False - - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "some-model" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MAX_TOKENS] == 100 - assert span["data"][SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY] == 0.1 - assert span["data"][SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY] == 0.2 - assert span["data"][SPANDATA.GEN_AI_REQUEST_TEMPERATURE] == 0.7 - assert span["data"][SPANDATA.GEN_AI_REQUEST_TOP_P] == 0.9 + + span = next(item.payload for item in items if item.type == "span") + assert span["attributes"]["sentry.op"] == "gen_ai.chat" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "openai" + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is False + + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "some-model" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MAX_TOKENS] == 100 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY] == 0.1 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY] == 0.2 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_TEMPERATURE] == 0.7 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_TOP_P] == 0.9 param_id = request.node.callspec.id if "blocks" in param_id: - assert json.loads(span["data"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS]) == [ + assert json.loads(span["attributes"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS]) == [ { "type": "text", "content": "You are a helpful assistant.", } ] else: - assert json.loads(span["data"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS]) == [ + assert json.loads(span["attributes"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS]) == [ { "type": "text", "content": "You are a helpful assistant.", @@ -463,12 +459,12 @@ async def test_nonstreaming_chat_completion_async( }, ] - assert "hello" in span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] - assert "the model response" in span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] + assert "hello" in span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + assert "the model response" in span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] - assert span["data"]["gen_ai.usage.output_tokens"] == 10 - assert span["data"]["gen_ai.usage.input_tokens"] == 20 - assert span["data"]["gen_ai.usage.total_tokens"] == 30 + assert span["attributes"]["gen_ai.usage.output_tokens"] == 10 + assert span["attributes"]["gen_ai.usage.input_tokens"] == 20 + assert span["attributes"]["gen_ai.usage.total_tokens"] == 30 def tiktoken_encoding_if_installed(): @@ -491,7 +487,7 @@ def tiktoken_encoding_if_installed(): ) def test_streaming_chat_completion_no_prompts( sentry_init, - capture_events, + capture_items, send_default_pii, include_prompts, get_model_response, @@ -507,7 +503,7 @@ def test_streaming_chat_completion_no_prompts( traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("span") client = OpenAI(api_key="z") returned_stream = get_model_response( @@ -581,32 +577,31 @@ def test_streaming_chat_completion_no_prompts( ) assert response_string == "hello world" - tx = events[0] - assert tx["type"] == "transaction" - span = tx["spans"][0] - assert span["op"] == "gen_ai.chat" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "openai" - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True - - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "some-model" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MAX_TOKENS] == 100 - assert span["data"][SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY] == 0.1 - assert span["data"][SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY] == 0.2 - assert span["data"][SPANDATA.GEN_AI_REQUEST_TEMPERATURE] == 0.7 - assert span["data"][SPANDATA.GEN_AI_REQUEST_TOP_P] == 0.9 - - assert span["data"][SPANDATA.GEN_AI_RESPONSE_MODEL] == "model-id" - - assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in span["data"] - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] + + span = next(item.payload for item in items if item.type == "span") + assert span["attributes"]["sentry.op"] == "gen_ai.chat" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "openai" + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True + + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "some-model" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MAX_TOKENS] == 100 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY] == 0.1 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY] == 0.2 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_TEMPERATURE] == 0.7 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_TOP_P] == 0.9 + + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_MODEL] == "model-id" + + assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in span["attributes"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["attributes"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["attributes"] try: import tiktoken # type: ignore # noqa # pylint: disable=unused-import - assert span["data"]["gen_ai.usage.output_tokens"] == 2 - assert span["data"]["gen_ai.usage.input_tokens"] == 7 - assert span["data"]["gen_ai.usage.total_tokens"] == 9 + assert span["attributes"]["gen_ai.usage.output_tokens"] == 2 + assert span["attributes"]["gen_ai.usage.input_tokens"] == 7 + assert span["attributes"]["gen_ai.usage.total_tokens"] == 9 except ImportError: pass # if tiktoken is not installed, we can't guarantee token usage will be calculated properly @@ -617,7 +612,7 @@ def test_streaming_chat_completion_no_prompts( ) def test_streaming_chat_completion_with_usage_in_stream( sentry_init, - capture_events, + capture_items, get_model_response, server_side_event_chunks, ): @@ -627,7 +622,7 @@ def test_streaming_chat_completion_with_usage_in_stream( traces_sample_rate=1.0, send_default_pii=False, ) - events = capture_events() + items = capture_items("span") client = OpenAI(api_key="z") returned_stream = get_model_response( @@ -684,13 +679,11 @@ def test_streaming_chat_completion_with_usage_in_stream( for _ in response_stream: pass - tx = events[0] - assert tx["type"] == "transaction" - span = tx["spans"][0] - assert span["op"] == "gen_ai.chat" - assert span["data"]["gen_ai.usage.input_tokens"] == 20 - assert span["data"]["gen_ai.usage.output_tokens"] == 10 - assert span["data"]["gen_ai.usage.total_tokens"] == 30 + span = next(item.payload for item in items if item.type == "span") + assert span["attributes"]["sentry.op"] == "gen_ai.chat" + assert span["attributes"]["gen_ai.usage.input_tokens"] == 20 + assert span["attributes"]["gen_ai.usage.output_tokens"] == 10 + assert span["attributes"]["gen_ai.usage.total_tokens"] == 30 @pytest.mark.skipif( @@ -699,7 +692,7 @@ def test_streaming_chat_completion_with_usage_in_stream( ) def test_streaming_chat_completion_empty_content_preserves_token_usage( sentry_init, - capture_events, + capture_items, get_model_response, server_side_event_chunks, ): @@ -709,7 +702,7 @@ def test_streaming_chat_completion_empty_content_preserves_token_usage( traces_sample_rate=1.0, send_default_pii=False, ) - events = capture_events() + items = capture_items("span") client = OpenAI(api_key="z") returned_stream = get_model_response( @@ -747,13 +740,11 @@ def test_streaming_chat_completion_empty_content_preserves_token_usage( for _ in response_stream: pass - tx = events[0] - assert tx["type"] == "transaction" - span = tx["spans"][0] - assert span["op"] == "gen_ai.chat" - assert span["data"]["gen_ai.usage.input_tokens"] == 20 - assert "gen_ai.usage.output_tokens" not in span["data"] - assert span["data"]["gen_ai.usage.total_tokens"] == 20 + span = next(item.payload for item in items if item.type == "span") + assert span["attributes"]["sentry.op"] == "gen_ai.chat" + assert span["attributes"]["gen_ai.usage.input_tokens"] == 20 + assert "gen_ai.usage.output_tokens" not in span["attributes"] + assert span["attributes"]["gen_ai.usage.total_tokens"] == 20 @pytest.mark.skipif( @@ -763,7 +754,7 @@ def test_streaming_chat_completion_empty_content_preserves_token_usage( @pytest.mark.asyncio async def test_streaming_chat_completion_empty_content_preserves_token_usage_async( sentry_init, - capture_events, + capture_items, get_model_response, async_iterator, server_side_event_chunks, @@ -774,7 +765,7 @@ async def test_streaming_chat_completion_empty_content_preserves_token_usage_asy traces_sample_rate=1.0, send_default_pii=False, ) - events = capture_events() + items = capture_items("span") client = AsyncOpenAI(api_key="z") returned_stream = get_model_response( @@ -814,13 +805,11 @@ async def test_streaming_chat_completion_empty_content_preserves_token_usage_asy async for _ in response_stream: pass - tx = events[0] - assert tx["type"] == "transaction" - span = tx["spans"][0] - assert span["op"] == "gen_ai.chat" - assert span["data"]["gen_ai.usage.input_tokens"] == 20 - assert "gen_ai.usage.output_tokens" not in span["data"] - assert span["data"]["gen_ai.usage.total_tokens"] == 20 + span = next(item.payload for item in items if item.type == "span") + assert span["attributes"]["sentry.op"] == "gen_ai.chat" + assert span["attributes"]["gen_ai.usage.input_tokens"] == 20 + assert "gen_ai.usage.output_tokens" not in span["attributes"] + assert span["attributes"]["gen_ai.usage.total_tokens"] == 20 @pytest.mark.skipif( @@ -830,7 +819,7 @@ async def test_streaming_chat_completion_empty_content_preserves_token_usage_asy @pytest.mark.asyncio async def test_streaming_chat_completion_async_with_usage_in_stream( sentry_init, - capture_events, + capture_items, get_model_response, async_iterator, server_side_event_chunks, @@ -841,7 +830,7 @@ async def test_streaming_chat_completion_async_with_usage_in_stream( traces_sample_rate=1.0, send_default_pii=False, ) - events = capture_events() + items = capture_items("span") client = AsyncOpenAI(api_key="z") returned_stream = get_model_response( @@ -900,13 +889,11 @@ async def test_streaming_chat_completion_async_with_usage_in_stream( async for _ in response_stream: pass - tx = events[0] - assert tx["type"] == "transaction" - span = tx["spans"][0] - assert span["op"] == "gen_ai.chat" - assert span["data"]["gen_ai.usage.input_tokens"] == 20 - assert span["data"]["gen_ai.usage.output_tokens"] == 10 - assert span["data"]["gen_ai.usage.total_tokens"] == 30 + span = next(item.payload for item in items if item.type == "span") + assert span["attributes"]["sentry.op"] == "gen_ai.chat" + assert span["attributes"]["gen_ai.usage.input_tokens"] == 20 + assert span["attributes"]["gen_ai.usage.output_tokens"] == 10 + assert span["attributes"]["gen_ai.usage.total_tokens"] == 30 # noinspection PyTypeChecker @@ -955,7 +942,7 @@ async def test_streaming_chat_completion_async_with_usage_in_stream( ) def test_streaming_chat_completion( sentry_init, - capture_events, + capture_items, messages, request, get_model_response, @@ -971,7 +958,7 @@ def test_streaming_chat_completion( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("span") client = OpenAI(api_key="z") returned_stream = get_model_response( @@ -1041,30 +1028,29 @@ def test_streaming_chat_completion( map(lambda x: x.choices[0].delta.content, response_stream) ) assert response_string == "hello world" - tx = events[0] - assert tx["type"] == "transaction" - span = tx["spans"][0] - assert span["op"] == "gen_ai.chat" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "openai" - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True - - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "some-model" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MAX_TOKENS] == 100 - assert span["data"][SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY] == 0.1 - assert span["data"][SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY] == 0.2 - assert span["data"][SPANDATA.GEN_AI_REQUEST_TEMPERATURE] == 0.7 - assert span["data"][SPANDATA.GEN_AI_REQUEST_TOP_P] == 0.9 + + span = next(item.payload for item in items if item.type == "span") + assert span["attributes"]["sentry.op"] == "gen_ai.chat" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "openai" + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True + + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "some-model" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MAX_TOKENS] == 100 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY] == 0.1 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY] == 0.2 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_TEMPERATURE] == 0.7 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_TOP_P] == 0.9 param_id = request.node.callspec.id if "blocks" in param_id: - assert json.loads(span["data"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS]) == [ + assert json.loads(span["attributes"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS]) == [ { "type": "text", "content": "You are a helpful assistant.", } ] else: - assert json.loads(span["data"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS]) == [ + assert json.loads(span["attributes"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS]) == [ { "type": "text", "content": "You are a helpful assistant.", @@ -1075,22 +1061,22 @@ def test_streaming_chat_completion( }, ] - assert span["data"][SPANDATA.GEN_AI_RESPONSE_MODEL] == "model-id" + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_MODEL] == "model-id" - assert "hello" in span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] - assert "hello world" in span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] + assert "hello" in span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + assert "hello world" in span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] try: import tiktoken # type: ignore # noqa # pylint: disable=unused-import if "blocks" in param_id: - assert span["data"]["gen_ai.usage.output_tokens"] == 2 - assert span["data"]["gen_ai.usage.input_tokens"] == 7 - assert span["data"]["gen_ai.usage.total_tokens"] == 9 + assert span["attributes"]["gen_ai.usage.output_tokens"] == 2 + assert span["attributes"]["gen_ai.usage.input_tokens"] == 7 + assert span["attributes"]["gen_ai.usage.total_tokens"] == 9 else: - assert span["data"]["gen_ai.usage.output_tokens"] == 2 - assert span["data"]["gen_ai.usage.input_tokens"] == 12 - assert span["data"]["gen_ai.usage.total_tokens"] == 14 + assert span["attributes"]["gen_ai.usage.output_tokens"] == 2 + assert span["attributes"]["gen_ai.usage.input_tokens"] == 12 + assert span["attributes"]["gen_ai.usage.total_tokens"] == 14 except ImportError: pass # if tiktoken is not installed, we can't guarantee token usage will be calculated properly @@ -1107,7 +1093,7 @@ def test_streaming_chat_completion( ) async def test_streaming_chat_completion_async_no_prompts( sentry_init, - capture_events, + capture_items, send_default_pii, include_prompts, get_model_response, @@ -1124,7 +1110,7 @@ async def test_streaming_chat_completion_async_no_prompts( traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("span") client = AsyncOpenAI(api_key="z") returned_stream = get_model_response( @@ -1201,32 +1187,31 @@ async def test_streaming_chat_completion_async_no_prompts( response_string += x.choices[0].delta.content assert response_string == "hello world" - tx = events[0] - assert tx["type"] == "transaction" - span = tx["spans"][0] - assert span["op"] == "gen_ai.chat" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "openai" - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True - - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "some-model" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MAX_TOKENS] == 100 - assert span["data"][SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY] == 0.1 - assert span["data"][SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY] == 0.2 - assert span["data"][SPANDATA.GEN_AI_REQUEST_TEMPERATURE] == 0.7 - assert span["data"][SPANDATA.GEN_AI_REQUEST_TOP_P] == 0.9 - - assert span["data"][SPANDATA.GEN_AI_RESPONSE_MODEL] == "model-id" - - assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in span["data"] - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] + + span = next(item.payload for item in items if item.type == "span") + assert span["attributes"]["sentry.op"] == "gen_ai.chat" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "openai" + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True + + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "some-model" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MAX_TOKENS] == 100 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY] == 0.1 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY] == 0.2 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_TEMPERATURE] == 0.7 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_TOP_P] == 0.9 + + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_MODEL] == "model-id" + + assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in span["attributes"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["attributes"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["attributes"] try: import tiktoken # type: ignore # noqa # pylint: disable=unused-import - assert span["data"]["gen_ai.usage.output_tokens"] == 2 - assert span["data"]["gen_ai.usage.input_tokens"] == 7 - assert span["data"]["gen_ai.usage.total_tokens"] == 9 + assert span["attributes"]["gen_ai.usage.output_tokens"] == 2 + assert span["attributes"]["gen_ai.usage.input_tokens"] == 7 + assert span["attributes"]["gen_ai.usage.total_tokens"] == 9 except ImportError: pass # if tiktoken is not installed, we can't guarantee token usage will be calculated properly @@ -1279,7 +1264,7 @@ async def test_streaming_chat_completion_async_no_prompts( ) async def test_streaming_chat_completion_async( sentry_init, - capture_events, + capture_items, messages, request, get_model_response, @@ -1296,7 +1281,7 @@ async def test_streaming_chat_completion_async( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("span") client = AsyncOpenAI(api_key="z") @@ -1371,32 +1356,31 @@ async def test_streaming_chat_completion_async( response_string += x.choices[0].delta.content assert response_string == "hello world" - tx = events[0] - assert tx["type"] == "transaction" - span = tx["spans"][0] - assert span["op"] == "gen_ai.chat" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "openai" - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True - - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "some-model" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MAX_TOKENS] == 100 - assert span["data"][SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY] == 0.1 - assert span["data"][SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY] == 0.2 - assert span["data"][SPANDATA.GEN_AI_REQUEST_TEMPERATURE] == 0.7 - assert span["data"][SPANDATA.GEN_AI_REQUEST_TOP_P] == 0.9 - - assert span["data"][SPANDATA.GEN_AI_RESPONSE_MODEL] == "model-id" + + span = next(item.payload for item in items if item.type == "span") + assert span["attributes"]["sentry.op"] == "gen_ai.chat" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "openai" + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True + + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "some-model" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MAX_TOKENS] == 100 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY] == 0.1 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY] == 0.2 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_TEMPERATURE] == 0.7 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_TOP_P] == 0.9 + + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_MODEL] == "model-id" param_id = request.node.callspec.id if "blocks" in param_id: - assert json.loads(span["data"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS]) == [ + assert json.loads(span["attributes"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS]) == [ { "type": "text", "content": "You are a helpful assistant.", } ] else: - assert json.loads(span["data"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS]) == [ + assert json.loads(span["attributes"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS]) == [ { "type": "text", "content": "You are a helpful assistant.", @@ -1407,28 +1391,31 @@ async def test_streaming_chat_completion_async( }, ] - assert "hello" in span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] - assert "hello world" in span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] + assert "hello" in span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + assert "hello world" in span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] try: import tiktoken # type: ignore # noqa # pylint: disable=unused-import if "blocks" in param_id: - assert span["data"]["gen_ai.usage.output_tokens"] == 2 - assert span["data"]["gen_ai.usage.input_tokens"] == 7 - assert span["data"]["gen_ai.usage.total_tokens"] == 9 + assert span["attributes"]["gen_ai.usage.output_tokens"] == 2 + assert span["attributes"]["gen_ai.usage.input_tokens"] == 7 + assert span["attributes"]["gen_ai.usage.total_tokens"] == 9 else: - assert span["data"]["gen_ai.usage.output_tokens"] == 2 - assert span["data"]["gen_ai.usage.input_tokens"] == 12 - assert span["data"]["gen_ai.usage.total_tokens"] == 14 + assert span["attributes"]["gen_ai.usage.output_tokens"] == 2 + assert span["attributes"]["gen_ai.usage.input_tokens"] == 12 + assert span["attributes"]["gen_ai.usage.total_tokens"] == 14 except ImportError: pass # if tiktoken is not installed, we can't guarantee token usage will be calculated properly -def test_bad_chat_completion(sentry_init, capture_events): - sentry_init(integrations=[OpenAIIntegration()], traces_sample_rate=1.0) - events = capture_events() +def test_bad_chat_completion(sentry_init, capture_items): + sentry_init( + integrations=[OpenAIIntegration()], + traces_sample_rate=1.0, + ) + items = capture_items("event") client = OpenAI(api_key="z") client.chat.completions._post = mock.Mock( @@ -1440,13 +1427,16 @@ def test_bad_chat_completion(sentry_init, capture_events): messages=[{"role": "system", "content": "hello"}], ) - (event,) = events + (event,) = (item.payload for item in items if item.type == "event") assert event["level"] == "error" -def test_span_status_error(sentry_init, capture_events): - sentry_init(integrations=[OpenAIIntegration()], traces_sample_rate=1.0) - events = capture_events() +def test_span_status_error(sentry_init, capture_items): + sentry_init( + integrations=[OpenAIIntegration()], + traces_sample_rate=1.0, + ) + items = capture_items("event", "transaction", "span") with start_transaction(name="test"): client = OpenAI(api_key="z") @@ -1458,17 +1448,23 @@ def test_span_status_error(sentry_init, capture_events): model="some-model", messages=[{"role": "system", "content": "hello"}] ) - (error, transaction) = events + (error,) = (item.payload for item in items if item.type == "event") assert error["level"] == "error" - assert transaction["spans"][0]["status"] == "internal_error" - assert transaction["spans"][0]["tags"]["status"] == "internal_error" + + spans = [item.payload for item in items if item.type == "span"] + assert spans[0]["status"] == "error" + + (transaction,) = (item.payload for item in items if item.type == "transaction") assert transaction["contexts"]["trace"]["status"] == "internal_error" @pytest.mark.asyncio -async def test_bad_chat_completion_async(sentry_init, capture_events): - sentry_init(integrations=[OpenAIIntegration()], traces_sample_rate=1.0) - events = capture_events() +async def test_bad_chat_completion_async(sentry_init, capture_items): + sentry_init( + integrations=[OpenAIIntegration()], + traces_sample_rate=1.0, + ) + items = capture_items("event") client = AsyncOpenAI(api_key="z") client.chat.completions._post = AsyncMock( @@ -1479,7 +1475,7 @@ async def test_bad_chat_completion_async(sentry_init, capture_events): model="some-model", messages=[{"role": "system", "content": "hello"}] ) - (event,) = events + (event,) = (item.payload for item in items if item.type == "event") assert event["level"] == "error" @@ -1492,14 +1488,14 @@ async def test_bad_chat_completion_async(sentry_init, capture_events): ], ) def test_embeddings_create_no_pii( - sentry_init, capture_events, send_default_pii, include_prompts + sentry_init, capture_items, send_default_pii, include_prompts ): sentry_init( integrations=[OpenAIIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("span") client = OpenAI(api_key="z") @@ -1521,17 +1517,15 @@ def test_embeddings_create_no_pii( assert len(response.data[0].embedding) == 3 - tx = events[0] - assert tx["type"] == "transaction" - span = tx["spans"][0] - assert span["op"] == "gen_ai.embeddings" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "openai" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "text-embedding-3-large" + span = next(item.payload for item in items if item.type == "span") + assert span["attributes"]["sentry.op"] == "gen_ai.embeddings" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "openai" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "text-embedding-3-large" - assert SPANDATA.GEN_AI_EMBEDDINGS_INPUT not in span["data"] + assert SPANDATA.GEN_AI_EMBEDDINGS_INPUT not in span["attributes"] - assert span["data"]["gen_ai.usage.input_tokens"] == 20 - assert span["data"]["gen_ai.usage.total_tokens"] == 30 + assert span["attributes"]["gen_ai.usage.input_tokens"] == 20 + assert span["attributes"]["gen_ai.usage.total_tokens"] == 30 @pytest.mark.parametrize( @@ -1577,13 +1571,13 @@ def test_embeddings_create_no_pii( ), ], ) -def test_embeddings_create(sentry_init, capture_events, input, request): +def test_embeddings_create(sentry_init, capture_items, input, request): sentry_init( integrations=[OpenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("span") client = OpenAI(api_key="z") @@ -1603,24 +1597,24 @@ def test_embeddings_create(sentry_init, capture_events, input, request): assert len(response.data[0].embedding) == 3 - tx = events[0] - assert tx["type"] == "transaction" - span = tx["spans"][0] - assert span["op"] == "gen_ai.embeddings" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "openai" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "text-embedding-3-large" + span = next(item.payload for item in items if item.type == "span") + assert span["attributes"]["sentry.op"] == "gen_ai.embeddings" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "openai" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "text-embedding-3-large" param_id = request.node.callspec.id if param_id == "string": - assert json.loads(span["data"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT]) == ["hello"] + assert json.loads(span["attributes"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT]) == [ + "hello" + ] elif param_id == "string_sequence" or param_id == "string_iterable": - assert json.loads(span["data"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT]) == [ + assert json.loads(span["attributes"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT]) == [ "First text", "Second text", "Third text", ] elif param_id == "tokens" or param_id == "token_iterable": - assert json.loads(span["data"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT]) == [ + assert json.loads(span["attributes"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT]) == [ 5, 8, 13, @@ -1628,13 +1622,13 @@ def test_embeddings_create(sentry_init, capture_events, input, request): 34, ] else: - assert json.loads(span["data"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT]) == [ + assert json.loads(span["attributes"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT]) == [ [5, 8, 13, 21, 34], [8, 13, 21, 34, 55], ] - assert span["data"]["gen_ai.usage.input_tokens"] == 20 - assert span["data"]["gen_ai.usage.total_tokens"] == 30 + assert span["attributes"]["gen_ai.usage.input_tokens"] == 20 + assert span["attributes"]["gen_ai.usage.total_tokens"] == 30 @pytest.mark.asyncio @@ -1647,14 +1641,14 @@ def test_embeddings_create(sentry_init, capture_events, input, request): ], ) async def test_embeddings_create_async_no_pii( - sentry_init, capture_events, send_default_pii, include_prompts + sentry_init, capture_items, send_default_pii, include_prompts ): sentry_init( integrations=[OpenAIIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("span") client = AsyncOpenAI(api_key="z") @@ -1676,17 +1670,15 @@ async def test_embeddings_create_async_no_pii( assert len(response.data[0].embedding) == 3 - tx = events[0] - assert tx["type"] == "transaction" - span = tx["spans"][0] - assert span["op"] == "gen_ai.embeddings" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "openai" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "text-embedding-3-large" + span = next(item.payload for item in items if item.type == "span") + assert span["attributes"]["sentry.op"] == "gen_ai.embeddings" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "openai" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "text-embedding-3-large" - assert SPANDATA.GEN_AI_EMBEDDINGS_INPUT not in span["data"] + assert SPANDATA.GEN_AI_EMBEDDINGS_INPUT not in span["attributes"] - assert span["data"]["gen_ai.usage.input_tokens"] == 20 - assert span["data"]["gen_ai.usage.total_tokens"] == 30 + assert span["attributes"]["gen_ai.usage.input_tokens"] == 20 + assert span["attributes"]["gen_ai.usage.total_tokens"] == 30 @pytest.mark.asyncio @@ -1733,13 +1725,13 @@ async def test_embeddings_create_async_no_pii( ), ], ) -async def test_embeddings_create_async(sentry_init, capture_events, input, request): +async def test_embeddings_create_async(sentry_init, capture_items, input, request): sentry_init( integrations=[OpenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("span") client = AsyncOpenAI(api_key="z") @@ -1761,24 +1753,24 @@ async def test_embeddings_create_async(sentry_init, capture_events, input, reque assert len(response.data[0].embedding) == 3 - tx = events[0] - assert tx["type"] == "transaction" - span = tx["spans"][0] - assert span["op"] == "gen_ai.embeddings" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "openai" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "text-embedding-3-large" + span = next(item.payload for item in items if item.type == "span") + assert span["attributes"]["sentry.op"] == "gen_ai.embeddings" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "openai" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MODEL] == "text-embedding-3-large" param_id = request.node.callspec.id if param_id == "string": - assert json.loads(span["data"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT]) == ["hello"] + assert json.loads(span["attributes"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT]) == [ + "hello" + ] elif param_id == "string_sequence" or param_id == "string_iterable": - assert json.loads(span["data"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT]) == [ + assert json.loads(span["attributes"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT]) == [ "First text", "Second text", "Third text", ] elif param_id == "tokens" or param_id == "token_iterable": - assert json.loads(span["data"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT]) == [ + assert json.loads(span["attributes"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT]) == [ 5, 8, 13, @@ -1786,13 +1778,13 @@ async def test_embeddings_create_async(sentry_init, capture_events, input, reque 34, ] else: - assert json.loads(span["data"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT]) == [ + assert json.loads(span["attributes"][SPANDATA.GEN_AI_EMBEDDINGS_INPUT]) == [ [5, 8, 13, 21, 34], [8, 13, 21, 34, 55], ] - assert span["data"]["gen_ai.usage.input_tokens"] == 20 - assert span["data"]["gen_ai.usage.total_tokens"] == 30 + assert span["attributes"]["gen_ai.usage.input_tokens"] == 20 + assert span["attributes"]["gen_ai.usage.total_tokens"] == 30 @pytest.mark.parametrize( @@ -1800,14 +1792,14 @@ async def test_embeddings_create_async(sentry_init, capture_events, input, reque [(True, True), (True, False), (False, True), (False, False)], ) def test_embeddings_create_raises_error( - sentry_init, capture_events, send_default_pii, include_prompts + sentry_init, capture_items, send_default_pii, include_prompts ): sentry_init( integrations=[OpenAIIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("event") client = OpenAI(api_key="z") @@ -1818,7 +1810,7 @@ def test_embeddings_create_raises_error( with pytest.raises(OpenAIError): client.embeddings.create(input="hello", model="text-embedding-3-large") - (event,) = events + (event,) = (item.payload for item in items if item.type == "event") assert event["level"] == "error" @@ -1828,14 +1820,14 @@ def test_embeddings_create_raises_error( [(True, True), (True, False), (False, True), (False, False)], ) async def test_embeddings_create_raises_error_async( - sentry_init, capture_events, send_default_pii, include_prompts + sentry_init, capture_items, send_default_pii, include_prompts ): sentry_init( integrations=[OpenAIIntegration(include_prompts=include_prompts)], traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("event") client = AsyncOpenAI(api_key="z") @@ -1846,16 +1838,16 @@ async def test_embeddings_create_raises_error_async( with pytest.raises(OpenAIError): await client.embeddings.create(input="hello", model="text-embedding-3-large") - (event,) = events + (event,) = (item.payload for item in items if item.type == "event") assert event["level"] == "error" -def test_span_origin_nonstreaming_chat(sentry_init, capture_events): +def test_span_origin_nonstreaming_chat(sentry_init, capture_items): sentry_init( integrations=[OpenAIIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") client = OpenAI(api_key="z") client.chat.completions._post = mock.Mock(return_value=EXAMPLE_CHAT_COMPLETION) @@ -1865,19 +1857,20 @@ def test_span_origin_nonstreaming_chat(sentry_init, capture_events): model="some-model", messages=[{"role": "system", "content": "hello"}] ) - (event,) = events - + (event,) = (item.payload for item in items if item.type == "transaction") assert event["contexts"]["trace"]["origin"] == "manual" - assert event["spans"][0]["origin"] == "auto.ai.openai" + + spans = [item.payload for item in items if item.type == "span"] + assert spans[0]["attributes"]["sentry.origin"] == "auto.ai.openai" @pytest.mark.asyncio -async def test_span_origin_nonstreaming_chat_async(sentry_init, capture_events): +async def test_span_origin_nonstreaming_chat_async(sentry_init, capture_items): sentry_init( integrations=[OpenAIIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") client = AsyncOpenAI(api_key="z") client.chat.completions._post = AsyncMock(return_value=EXAMPLE_CHAT_COMPLETION) @@ -1887,18 +1880,19 @@ async def test_span_origin_nonstreaming_chat_async(sentry_init, capture_events): model="some-model", messages=[{"role": "system", "content": "hello"}] ) - (event,) = events - + (event,) = (item.payload for item in items if item.type == "transaction") assert event["contexts"]["trace"]["origin"] == "manual" - assert event["spans"][0]["origin"] == "auto.ai.openai" + + spans = [item.payload for item in items if item.type == "span"] + assert spans[0]["attributes"]["sentry.origin"] == "auto.ai.openai" -def test_span_origin_streaming_chat(sentry_init, capture_events): +def test_span_origin_streaming_chat(sentry_init, capture_items): sentry_init( integrations=[OpenAIIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") client = OpenAI(api_key="z") returned_stream = Stream(cast_to=None, response=None, client=client) @@ -1946,21 +1940,22 @@ def test_span_origin_streaming_chat(sentry_init, capture_events): "".join(map(lambda x: x.choices[0].delta.content, response_stream)) - (event,) = events - + (event,) = (item.payload for item in items if item.type == "transaction") assert event["contexts"]["trace"]["origin"] == "manual" - assert event["spans"][0]["origin"] == "auto.ai.openai" + + spans = [item.payload for item in items if item.type == "span"] + assert spans[0]["attributes"]["sentry.origin"] == "auto.ai.openai" @pytest.mark.asyncio async def test_span_origin_streaming_chat_async( - sentry_init, capture_events, async_iterator + sentry_init, capture_items, async_iterator ): sentry_init( integrations=[OpenAIIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") client = AsyncOpenAI(api_key="z") returned_stream = AsyncStream(cast_to=None, response=None, client=client) @@ -2014,18 +2009,19 @@ async def test_span_origin_streaming_chat_async( # "".join(map(lambda x: x.choices[0].delta.content, response_stream)) - (event,) = events - + (event,) = (item.payload for item in items if item.type == "transaction") assert event["contexts"]["trace"]["origin"] == "manual" - assert event["spans"][0]["origin"] == "auto.ai.openai" + + spans = [item.payload for item in items if item.type == "span"] + assert spans[0]["attributes"]["sentry.origin"] == "auto.ai.openai" -def test_span_origin_embeddings(sentry_init, capture_events): +def test_span_origin_embeddings(sentry_init, capture_items): sentry_init( integrations=[OpenAIIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") client = OpenAI(api_key="z") @@ -2043,19 +2039,20 @@ def test_span_origin_embeddings(sentry_init, capture_events): with start_transaction(name="openai tx"): client.embeddings.create(input="hello", model="text-embedding-3-large") - (event,) = events - + (event,) = [item.payload for item in items if item.type == "transaction"] assert event["contexts"]["trace"]["origin"] == "manual" - assert event["spans"][0]["origin"] == "auto.ai.openai" + + spans = [item.payload for item in items if item.type == "span"] + assert spans[0]["attributes"]["sentry.origin"] == "auto.ai.openai" @pytest.mark.asyncio -async def test_span_origin_embeddings_async(sentry_init, capture_events): +async def test_span_origin_embeddings_async(sentry_init, capture_items): sentry_init( integrations=[OpenAIIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") client = AsyncOpenAI(api_key="z") @@ -2073,10 +2070,11 @@ async def test_span_origin_embeddings_async(sentry_init, capture_events): with start_transaction(name="openai tx"): await client.embeddings.create(input="hello", model="text-embedding-3-large") - (event,) = events - + (event,) = [item.payload for item in items if item.type == "transaction"] assert event["contexts"]["trace"]["origin"] == "manual" - assert event["spans"][0]["origin"] == "auto.ai.openai" + + spans = [item.payload for item in items if item.type == "span"] + assert spans[0]["attributes"]["sentry.origin"] == "auto.ai.openai" def test_completions_token_usage_from_response(): @@ -2442,12 +2440,12 @@ def count_tokens(msg): @pytest.mark.skipif(SKIP_RESPONSES_TESTS, reason="Responses API not available") -def test_ai_client_span_responses_api_no_pii(sentry_init, capture_events): +def test_ai_client_span_responses_api_no_pii(sentry_init, capture_items): sentry_init( integrations=[OpenAIIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("span") client = OpenAI(api_key="z") client.responses._post = mock.Mock(return_value=EXAMPLE_RESPONSE) @@ -2462,13 +2460,10 @@ def test_ai_client_span_responses_api_no_pii(sentry_init, capture_events): top_p=0.9, ) - (transaction,) = events - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] assert len(spans) == 1 - assert spans[0]["op"] == "gen_ai.responses" - assert spans[0]["origin"] == "auto.ai.openai" - assert spans[0]["data"] == { + assert spans[0]["attributes"] == { "gen_ai.operation.name": "responses", "gen_ai.request.max_tokens": 100, "gen_ai.request.temperature": 0.7, @@ -2482,13 +2477,21 @@ def test_ai_client_span_responses_api_no_pii(sentry_init, capture_events): "gen_ai.usage.output_tokens": 10, "gen_ai.usage.output_tokens.reasoning": 8, "gen_ai.usage.total_tokens": 30, + "sentry.environment": "production", + "sentry.op": "gen_ai.responses", + "sentry.origin": "auto.ai.openai", + "sentry.release": mock.ANY, + "sentry.sdk.name": "sentry.python", + "sentry.sdk.version": mock.ANY, + "sentry.segment.id": mock.ANY, + "sentry.segment.name": "openai tx", "thread.id": mock.ANY, "thread.name": mock.ANY, } - assert "gen_ai.system_instructions" not in spans[0]["data"] - assert "gen_ai.request.messages" not in spans[0]["data"] - assert "gen_ai.response.text" not in spans[0]["data"] + assert "gen_ai.system_instructions" not in spans[0]["attributes"] + assert "gen_ai.request.messages" not in spans[0]["attributes"] + assert "gen_ai.response.text" not in spans[0]["attributes"] @pytest.mark.parametrize( @@ -2557,14 +2560,14 @@ def test_ai_client_span_responses_api_no_pii(sentry_init, capture_events): ) @pytest.mark.skipif(SKIP_RESPONSES_TESTS, reason="Responses API not available") def test_ai_client_span_responses_api( - sentry_init, capture_events, instructions, input, request + sentry_init, capture_items, instructions, input, request ): sentry_init( integrations=[OpenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("span") client = OpenAI(api_key="z") client.responses._post = mock.Mock(return_value=EXAMPLE_RESPONSE) @@ -2579,12 +2582,9 @@ def test_ai_client_span_responses_api( top_p=0.9, ) - (transaction,) = events - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] assert len(spans) == 1 - assert spans[0]["op"] == "gen_ai.responses" - assert spans[0]["origin"] == "auto.ai.openai" expected_data = { "gen_ai.operation.name": "responses", @@ -2601,6 +2601,14 @@ def test_ai_client_span_responses_api( "gen_ai.usage.total_tokens": 30, "gen_ai.request.model": "gpt-4o", "gen_ai.response.text": "the model response", + "sentry.environment": "production", + "sentry.op": "gen_ai.responses", + "sentry.origin": "auto.ai.openai", + "sentry.release": mock.ANY, + "sentry.sdk.name": "sentry.python", + "sentry.sdk.version": mock.ANY, + "sentry.segment.id": mock.ANY, + "sentry.segment.name": "openai tx", "thread.id": mock.ANY, "thread.name": mock.ANY, } @@ -2759,17 +2767,17 @@ def test_ai_client_span_responses_api( } ) - assert spans[0]["data"] == expected_data + assert spans[0]["attributes"] == expected_data @pytest.mark.skipif(SKIP_RESPONSES_TESTS, reason="Responses API not available") -def test_error_in_responses_api(sentry_init, capture_events): +def test_error_in_responses_api(sentry_init, capture_items): sentry_init( integrations=[OpenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("event", "transaction", "span") client = OpenAI(api_key="z") client.responses._post = mock.Mock( @@ -2784,15 +2792,17 @@ def test_error_in_responses_api(sentry_init, capture_events): input="How do I check if a Python object is an instance of a class?", ) - (error_event, transaction_event) = events - - assert transaction_event["type"] == "transaction" # make sure the span where the error occurred is captured - assert transaction_event["spans"][0]["op"] == "gen_ai.responses" + spans = [item.payload for item in items if item.type == "span"] + assert spans[0]["attributes"]["sentry.op"] == "gen_ai.responses" + (error_event,) = (item.payload for item in items if item.type == "event") assert error_event["level"] == "error" assert error_event["exception"]["values"][0]["type"] == "OpenAIError" + (transaction_event,) = ( + item.payload for item in items if item.type == "transaction" + ) assert ( error_event["contexts"]["trace"]["trace_id"] == transaction_event["contexts"]["trace"]["trace_id"] @@ -2866,14 +2876,14 @@ def test_error_in_responses_api(sentry_init, capture_events): ], ) async def test_ai_client_span_responses_async_api( - sentry_init, capture_events, instructions, input, request + sentry_init, capture_items, instructions, input, request ): sentry_init( integrations=[OpenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("span") client = AsyncOpenAI(api_key="z") client.responses._post = AsyncMock(return_value=EXAMPLE_RESPONSE) @@ -2888,12 +2898,9 @@ async def test_ai_client_span_responses_async_api( top_p=0.9, ) - (transaction,) = events - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] assert len(spans) == 1 - assert spans[0]["op"] == "gen_ai.responses" - assert spans[0]["origin"] == "auto.ai.openai" expected_data = { "gen_ai.operation.name": "responses", @@ -2911,6 +2918,14 @@ async def test_ai_client_span_responses_async_api( "gen_ai.usage.output_tokens.reasoning": 8, "gen_ai.usage.total_tokens": 30, "gen_ai.response.text": "the model response", + "sentry.environment": "production", + "sentry.op": "gen_ai.responses", + "sentry.origin": "auto.ai.openai", + "sentry.release": mock.ANY, + "sentry.sdk.name": "sentry.python", + "sentry.sdk.version": mock.ANY, + "sentry.segment.id": mock.ANY, + "sentry.segment.name": "openai tx", "thread.id": mock.ANY, "thread.name": mock.ANY, } @@ -3069,7 +3084,7 @@ async def test_ai_client_span_responses_async_api( } ) - assert spans[0]["data"] == expected_data + assert spans[0]["attributes"] == expected_data @pytest.mark.asyncio @@ -3140,7 +3155,7 @@ async def test_ai_client_span_responses_async_api( @pytest.mark.skipif(SKIP_RESPONSES_TESTS, reason="Responses API not available") async def test_ai_client_span_streaming_responses_async_api( sentry_init, - capture_events, + capture_items, instructions, input, request, @@ -3153,7 +3168,7 @@ async def test_ai_client_span_streaming_responses_async_api( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("span") client = AsyncOpenAI(api_key="z") returned_stream = get_model_response( @@ -3178,11 +3193,12 @@ async def test_ai_client_span_streaming_responses_async_api( async for _ in result: pass - (transaction,) = events - spans = [span for span in transaction["spans"] if span["op"] == OP.GEN_AI_RESPONSES] + spans = [item.payload for item in items if item.type == "span"] + spans = [ + span for span in spans if span["attributes"]["sentry.op"] == OP.GEN_AI_RESPONSES + ] assert len(spans) == 1 - assert spans[0]["origin"] == "auto.ai.openai" expected_data = { "gen_ai.operation.name": "responses", @@ -3200,6 +3216,14 @@ async def test_ai_client_span_streaming_responses_async_api( "gen_ai.usage.total_tokens": 30, "gen_ai.request.model": "gpt-4o", "gen_ai.response.text": "hello world", + "sentry.environment": "production", + "sentry.op": "gen_ai.responses", + "sentry.origin": "auto.ai.openai", + "sentry.release": mock.ANY, + "sentry.sdk.name": "sentry.python", + "sentry.sdk.version": mock.ANY, + "sentry.segment.id": mock.ANY, + "sentry.segment.name": "openai tx", "thread.id": mock.ANY, "thread.name": mock.ANY, } @@ -3358,18 +3382,18 @@ async def test_ai_client_span_streaming_responses_async_api( } ) - assert spans[0]["data"] == expected_data + assert spans[0]["attributes"] == expected_data @pytest.mark.asyncio @pytest.mark.skipif(SKIP_RESPONSES_TESTS, reason="Responses API not available") -async def test_error_in_responses_async_api(sentry_init, capture_events): +async def test_error_in_responses_async_api(sentry_init, capture_items): sentry_init( integrations=[OpenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("event", "transaction", "span") client = AsyncOpenAI(api_key="z") client.responses._post = AsyncMock( @@ -3384,15 +3408,17 @@ async def test_error_in_responses_async_api(sentry_init, capture_events): input="How do I check if a Python object is an instance of a class?", ) - (error_event, transaction_event) = events - - assert transaction_event["type"] == "transaction" # make sure the span where the error occurred is captured - assert transaction_event["spans"][0]["op"] == "gen_ai.responses" + spans = [item.payload for item in items if item.type == "span"] + assert spans[0]["attributes"]["sentry.op"] == "gen_ai.responses" + (error_event,) = (item.payload for item in items if item.type == "event") assert error_event["level"] == "error" assert error_event["exception"]["values"][0]["type"] == "OpenAIError" + (transaction_event,) = ( + item.payload for item in items if item.type == "transaction" + ) assert ( error_event["contexts"]["trace"]["trace_id"] == transaction_event["contexts"]["trace"]["trace_id"] @@ -3479,7 +3505,7 @@ async def test_error_in_responses_async_api(sentry_init, capture_events): @pytest.mark.skipif(SKIP_RESPONSES_TESTS, reason="Responses API not available") def test_streaming_responses_api( sentry_init, - capture_events, + capture_items, send_default_pii, include_prompts, get_model_response, @@ -3494,7 +3520,7 @@ def test_streaming_responses_api( traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("span") client = OpenAI(api_key="z") returned_stream = get_model_response( @@ -3525,26 +3551,25 @@ def test_streaming_responses_api( assert response_string == "hello world" - (transaction,) = events - (span,) = transaction["spans"] - assert span["op"] == "gen_ai.responses" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "openai" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MAX_TOKENS] == 100 - assert span["data"][SPANDATA.GEN_AI_REQUEST_TEMPERATURE] == 0.7 - assert span["data"][SPANDATA.GEN_AI_REQUEST_TOP_P] == 0.9 + (span,) = (item.payload for item in items if item.type == "span") + assert span["attributes"]["sentry.op"] == "gen_ai.responses" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "openai" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MAX_TOKENS] == 100 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_TEMPERATURE] == 0.7 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_TOP_P] == 0.9 - assert span["data"][SPANDATA.GEN_AI_RESPONSE_MODEL] == "response-model-id" + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_MODEL] == "response-model-id" if send_default_pii and include_prompts: - assert span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] == '["hello"]' - assert span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "hello world" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] == '["hello"]' + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "hello world" else: - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["attributes"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["attributes"] - assert span["data"]["gen_ai.usage.input_tokens"] == 20 - assert span["data"]["gen_ai.usage.output_tokens"] == 10 - assert span["data"]["gen_ai.usage.total_tokens"] == 30 + assert span["attributes"]["gen_ai.usage.input_tokens"] == 20 + assert span["attributes"]["gen_ai.usage.output_tokens"] == 10 + assert span["attributes"]["gen_ai.usage.total_tokens"] == 30 @pytest.mark.asyncio @@ -3555,7 +3580,7 @@ def test_streaming_responses_api( @pytest.mark.skipif(SKIP_RESPONSES_TESTS, reason="Responses API not available") async def test_streaming_responses_api_async( sentry_init, - capture_events, + capture_items, send_default_pii, include_prompts, get_model_response, @@ -3571,7 +3596,7 @@ async def test_streaming_responses_api_async( traces_sample_rate=1.0, send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("span") client = AsyncOpenAI(api_key="z") returned_stream = get_model_response( @@ -3600,26 +3625,25 @@ async def test_streaming_responses_api_async( assert response_string == "hello world" - (transaction,) = events - (span,) = transaction["spans"] - assert span["op"] == "gen_ai.responses" - assert span["data"][SPANDATA.GEN_AI_SYSTEM] == "openai" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MAX_TOKENS] == 100 - assert span["data"][SPANDATA.GEN_AI_REQUEST_TEMPERATURE] == 0.7 - assert span["data"][SPANDATA.GEN_AI_REQUEST_TOP_P] == 0.9 + (span,) = (item.payload for item in items if item.type == "span") + assert span["attributes"]["sentry.op"] == "gen_ai.responses" + assert span["attributes"][SPANDATA.GEN_AI_SYSTEM] == "openai" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MAX_TOKENS] == 100 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_TEMPERATURE] == 0.7 + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_TOP_P] == 0.9 - assert span["data"][SPANDATA.GEN_AI_RESPONSE_MODEL] == "response-model-id" + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_MODEL] == "response-model-id" if send_default_pii and include_prompts: - assert span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] == '["hello"]' - assert span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "hello world" + assert span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] == '["hello"]' + assert span["attributes"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "hello world" else: - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] + assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["attributes"] + assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["attributes"] - assert span["data"]["gen_ai.usage.input_tokens"] == 20 - assert span["data"]["gen_ai.usage.output_tokens"] == 10 - assert span["data"]["gen_ai.usage.total_tokens"] == 30 + assert span["attributes"]["gen_ai.usage.input_tokens"] == 20 + assert span["attributes"]["gen_ai.usage.output_tokens"] == 10 + assert span["attributes"]["gen_ai.usage.total_tokens"] == 30 @pytest.mark.skipif( @@ -3630,12 +3654,12 @@ async def test_streaming_responses_api_async( "tools", [[], None, NOT_GIVEN, omit], ) -def test_empty_tools_in_chat_completion(sentry_init, capture_events, tools): +def test_empty_tools_in_chat_completion(sentry_init, capture_items, tools): sentry_init( integrations=[OpenAIIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("span") client = OpenAI(api_key="z") client.chat.completions._post = mock.Mock(return_value=EXAMPLE_CHAT_COMPLETION) @@ -3647,10 +3671,9 @@ def test_empty_tools_in_chat_completion(sentry_init, capture_events, tools): tools=tools, ) - (event,) = events - span = event["spans"][0] + span = next(item.payload for item in items if item.type == "span") - assert "gen_ai.request.available_tools" not in span["data"] + assert "gen_ai.request.available_tools" not in span["attributes"] # Test messages with mixed roles including "ai" that should be mapped to "assistant" @@ -3669,7 +3692,7 @@ def test_empty_tools_in_chat_completion(sentry_init, capture_events, tools): ], ) def test_openai_message_role_mapping( - sentry_init, capture_events, test_message, expected_role + sentry_init, capture_items, test_message, expected_role ): """Test that OpenAI integration properly maps message roles like 'ai' to 'assistant'""" @@ -3678,7 +3701,7 @@ def test_openai_message_role_mapping( traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("span") client = OpenAI(api_key="z") client.chat.completions._post = mock.Mock(return_value=EXAMPLE_CHAT_COMPLETION) @@ -3688,28 +3711,27 @@ def test_openai_message_role_mapping( with start_transaction(name="openai tx"): client.chat.completions.create(model="test-model", messages=test_messages) # Verify that the span was created correctly - (event,) = events - span = event["spans"][0] - assert span["op"] == "gen_ai.chat" - assert SPANDATA.GEN_AI_REQUEST_MESSAGES in span["data"] + span = next(item.payload for item in items if item.type == "span") + assert span["attributes"]["sentry.op"] == "gen_ai.chat" + assert SPANDATA.GEN_AI_REQUEST_MESSAGES in span["attributes"] # Parse the stored messages import json - stored_messages = json.loads(span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) + stored_messages = json.loads(span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES]) assert len(stored_messages) == 1 assert stored_messages[0]["role"] == expected_role -def test_openai_message_truncation(sentry_init, capture_events): +def test_openai_message_truncation(sentry_init, capture_items): """Test that large messages are truncated properly in OpenAI integration.""" sentry_init( integrations=[OpenAIIntegration(include_prompts=True)], traces_sample_rate=1.0, send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") client = OpenAI(api_key="z") client.chat.completions._post = mock.Mock(return_value=EXAMPLE_CHAT_COMPLETION) @@ -3730,17 +3752,17 @@ def test_openai_message_truncation(sentry_init, capture_events): messages=large_messages, ) - (event,) = events - span = event["spans"][0] - assert SPANDATA.GEN_AI_REQUEST_MESSAGES in span["data"] + span = next(item.payload for item in items if item.type == "span") + assert SPANDATA.GEN_AI_REQUEST_MESSAGES in span["attributes"] - messages_data = span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + messages_data = span["attributes"][SPANDATA.GEN_AI_REQUEST_MESSAGES] assert isinstance(messages_data, str) parsed_messages = json.loads(messages_data) assert isinstance(parsed_messages, list) assert len(parsed_messages) <= len(large_messages) + (event,) = (item.payload for item in items if item.type == "transaction") meta_path = event["_meta"] span_meta = meta_path["spans"]["0"]["data"] messages_meta = span_meta[SPANDATA.GEN_AI_REQUEST_MESSAGES] @@ -3749,7 +3771,7 @@ def test_openai_message_truncation(sentry_init, capture_events): # noinspection PyTypeChecker def test_streaming_chat_completion_ttft( - sentry_init, capture_events, get_model_response, server_side_event_chunks + sentry_init, capture_items, get_model_response, server_side_event_chunks ): """ Test that streaming chat completions capture time-to-first-token (TTFT). @@ -3758,7 +3780,7 @@ def test_streaming_chat_completion_ttft( integrations=[OpenAIIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("span") client = OpenAI(api_key="z") returned_stream = get_model_response( @@ -3810,13 +3832,12 @@ def test_streaming_chat_completion_ttft( for _ in response_stream: pass - (tx,) = events - span = tx["spans"][0] - assert span["op"] == "gen_ai.chat" + span = next(item.payload for item in items if item.type == "span") + assert span["attributes"]["sentry.op"] == "gen_ai.chat" # Verify TTFT is captured - assert SPANDATA.GEN_AI_RESPONSE_TIME_TO_FIRST_TOKEN in span["data"] - ttft = span["data"][SPANDATA.GEN_AI_RESPONSE_TIME_TO_FIRST_TOKEN] + assert SPANDATA.GEN_AI_RESPONSE_TIME_TO_FIRST_TOKEN in span["attributes"] + ttft = span["attributes"][SPANDATA.GEN_AI_RESPONSE_TIME_TO_FIRST_TOKEN] assert isinstance(ttft, float) assert ttft > 0 @@ -3825,7 +3846,7 @@ def test_streaming_chat_completion_ttft( @pytest.mark.asyncio async def test_streaming_chat_completion_ttft_async( sentry_init, - capture_events, + capture_items, get_model_response, async_iterator, server_side_event_chunks, @@ -3837,7 +3858,7 @@ async def test_streaming_chat_completion_ttft_async( integrations=[OpenAIIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("span") client = AsyncOpenAI(api_key="z") returned_stream = get_model_response( @@ -3891,13 +3912,12 @@ async def test_streaming_chat_completion_ttft_async( async for _ in response_stream: pass - (tx,) = events - span = tx["spans"][0] - assert span["op"] == "gen_ai.chat" + span = next(item.payload for item in items if item.type == "span") + assert span["attributes"]["sentry.op"] == "gen_ai.chat" # Verify TTFT is captured - assert SPANDATA.GEN_AI_RESPONSE_TIME_TO_FIRST_TOKEN in span["data"] - ttft = span["data"][SPANDATA.GEN_AI_RESPONSE_TIME_TO_FIRST_TOKEN] + assert SPANDATA.GEN_AI_RESPONSE_TIME_TO_FIRST_TOKEN in span["attributes"] + ttft = span["attributes"][SPANDATA.GEN_AI_RESPONSE_TIME_TO_FIRST_TOKEN] assert isinstance(ttft, float) assert ttft > 0 @@ -3905,7 +3925,7 @@ async def test_streaming_chat_completion_ttft_async( # noinspection PyTypeChecker @pytest.mark.skipif(SKIP_RESPONSES_TESTS, reason="Responses API not available") def test_streaming_responses_api_ttft( - sentry_init, capture_events, get_model_response, server_side_event_chunks + sentry_init, capture_items, get_model_response, server_side_event_chunks ): """ Test that streaming responses API captures time-to-first-token (TTFT). @@ -3914,7 +3934,7 @@ def test_streaming_responses_api_ttft( integrations=[OpenAIIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("span") client = OpenAI(api_key="z") returned_stream = get_model_response( @@ -3936,13 +3956,12 @@ def test_streaming_responses_api_ttft( for _ in response_stream: pass - (tx,) = events - span = tx["spans"][0] - assert span["op"] == "gen_ai.responses" + span = next(item.payload for item in items if item.type == "span") + assert span["attributes"]["sentry.op"] == "gen_ai.responses" # Verify TTFT is captured - assert SPANDATA.GEN_AI_RESPONSE_TIME_TO_FIRST_TOKEN in span["data"] - ttft = span["data"][SPANDATA.GEN_AI_RESPONSE_TIME_TO_FIRST_TOKEN] + assert SPANDATA.GEN_AI_RESPONSE_TIME_TO_FIRST_TOKEN in span["attributes"] + ttft = span["attributes"][SPANDATA.GEN_AI_RESPONSE_TIME_TO_FIRST_TOKEN] assert isinstance(ttft, float) assert ttft > 0 @@ -3952,7 +3971,7 @@ def test_streaming_responses_api_ttft( @pytest.mark.skipif(SKIP_RESPONSES_TESTS, reason="Responses API not available") async def test_streaming_responses_api_ttft_async( sentry_init, - capture_events, + capture_items, get_model_response, async_iterator, server_side_event_chunks, @@ -3964,7 +3983,7 @@ async def test_streaming_responses_api_ttft_async( integrations=[OpenAIIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("span") client = AsyncOpenAI(api_key="z") returned_stream = get_model_response( @@ -3986,12 +4005,11 @@ async def test_streaming_responses_api_ttft_async( async for _ in response_stream: pass - (tx,) = events - span = tx["spans"][0] - assert span["op"] == "gen_ai.responses" + span = next(item.payload for item in items if item.type == "span") + assert span["attributes"]["sentry.op"] == "gen_ai.responses" # Verify TTFT is captured - assert SPANDATA.GEN_AI_RESPONSE_TIME_TO_FIRST_TOKEN in span["data"] - ttft = span["data"][SPANDATA.GEN_AI_RESPONSE_TIME_TO_FIRST_TOKEN] + assert SPANDATA.GEN_AI_RESPONSE_TIME_TO_FIRST_TOKEN in span["attributes"] + ttft = span["attributes"][SPANDATA.GEN_AI_RESPONSE_TIME_TO_FIRST_TOKEN] assert isinstance(ttft, float) assert ttft > 0 diff --git a/tests/integrations/openai_agents/test_openai_agents.py b/tests/integrations/openai_agents/test_openai_agents.py index 7310e86df5..bde222274c 100644 --- a/tests/integrations/openai_agents/test_openai_agents.py +++ b/tests/integrations/openai_agents/test_openai_agents.py @@ -160,7 +160,7 @@ def test_agent_custom_model(): @pytest.mark.asyncio async def test_agent_invocation_span_no_pii( sentry_init, - capture_events, + capture_items, test_agent, nonstreaming_responses_model_response, get_model_response, @@ -184,7 +184,7 @@ async def test_agent_invocation_span_no_pii( send_default_pii=False, ) - events = capture_events() + items = capture_items("span", "transaction") result = await agents.Runner.run( agent, "Test input", run_config=test_run_config @@ -193,38 +193,42 @@ async def test_agent_invocation_span_no_pii( assert result is not None assert result.final_output == "Hello, how can I help you?" - (transaction,) = events - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] invoke_agent_span = next( - span for span in spans if span["op"] == OP.GEN_AI_INVOKE_AGENT + span + for span in spans + if span["attributes"]["sentry.op"] == OP.GEN_AI_INVOKE_AGENT + ) + ai_client_span = next( + span for span in spans if span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT ) - ai_client_span = next(span for span in spans if span["op"] == OP.GEN_AI_CHAT) + (transaction,) = (item.payload for item in items if item.type == "transaction") assert transaction["transaction"] == "test_agent workflow" assert transaction["contexts"]["trace"]["origin"] == "auto.ai.openai_agents" - assert invoke_agent_span["description"] == "invoke_agent test_agent" + assert invoke_agent_span["name"] == "invoke_agent test_agent" - assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in invoke_agent_span["data"] - assert "gen_ai.request.messages" not in invoke_agent_span["data"] - assert "gen_ai.response.text" not in invoke_agent_span["data"] + assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in invoke_agent_span["attributes"] + assert "gen_ai.request.messages" not in invoke_agent_span["attributes"] + assert "gen_ai.response.text" not in invoke_agent_span["attributes"] - assert invoke_agent_span["data"]["gen_ai.operation.name"] == "invoke_agent" - assert invoke_agent_span["data"]["gen_ai.system"] == "openai" - assert invoke_agent_span["data"]["gen_ai.agent.name"] == "test_agent" - assert invoke_agent_span["data"]["gen_ai.request.max_tokens"] == 100 - assert invoke_agent_span["data"]["gen_ai.request.model"] == "gpt-4" - assert invoke_agent_span["data"]["gen_ai.request.temperature"] == 0.7 - assert invoke_agent_span["data"]["gen_ai.request.top_p"] == 1.0 + assert invoke_agent_span["attributes"]["gen_ai.operation.name"] == "invoke_agent" + assert invoke_agent_span["attributes"]["gen_ai.system"] == "openai" + assert invoke_agent_span["attributes"]["gen_ai.agent.name"] == "test_agent" + assert invoke_agent_span["attributes"]["gen_ai.request.max_tokens"] == 100 + assert invoke_agent_span["attributes"]["gen_ai.request.model"] == "gpt-4" + assert invoke_agent_span["attributes"]["gen_ai.request.temperature"] == 0.7 + assert invoke_agent_span["attributes"]["gen_ai.request.top_p"] == 1.0 - assert ai_client_span["description"] == "chat gpt-4" - assert ai_client_span["data"]["gen_ai.operation.name"] == "chat" - assert ai_client_span["data"]["gen_ai.system"] == "openai" - assert ai_client_span["data"]["gen_ai.agent.name"] == "test_agent" - assert ai_client_span["data"]["gen_ai.request.max_tokens"] == 100 - assert ai_client_span["data"]["gen_ai.request.model"] == "gpt-4" - assert ai_client_span["data"]["gen_ai.request.temperature"] == 0.7 - assert ai_client_span["data"]["gen_ai.request.top_p"] == 1.0 + assert ai_client_span["name"] == "chat gpt-4" + assert ai_client_span["attributes"]["gen_ai.operation.name"] == "chat" + assert ai_client_span["attributes"]["gen_ai.system"] == "openai" + assert ai_client_span["attributes"]["gen_ai.agent.name"] == "test_agent" + assert ai_client_span["attributes"]["gen_ai.request.max_tokens"] == 100 + assert ai_client_span["attributes"]["gen_ai.request.model"] == "gpt-4" + assert ai_client_span["attributes"]["gen_ai.request.temperature"] == 0.7 + assert ai_client_span["attributes"]["gen_ai.request.top_p"] == 1.0 @pytest.mark.asyncio @@ -305,7 +309,7 @@ async def test_agent_invocation_span_no_pii( ) async def test_agent_invocation_span( sentry_init, - capture_events, + capture_items, test_agent_with_instructions, nonstreaming_responses_model_response, instructions, @@ -335,7 +339,7 @@ async def test_agent_invocation_span( send_default_pii=True, ) - events = capture_events() + items = capture_items("span", "transaction") result = await agents.Runner.run( agent, @@ -346,28 +350,32 @@ async def test_agent_invocation_span( assert result is not None assert result.final_output == "Hello, how can I help you?" - (transaction,) = events - spans = transaction["spans"] - invoke_agent_span, ai_client_span = spans - + (transaction,) = (item.payload for item in items if item.type == "transaction") assert transaction["transaction"] == "test_agent workflow" assert transaction["contexts"]["trace"]["origin"] == "auto.ai.openai_agents" - assert invoke_agent_span["description"] == "invoke_agent test_agent" + spans = [item.payload for item in items if item.type == "span"] + invoke_agent_span, ai_client_span = spans + + assert invoke_agent_span["name"] == "invoke_agent test_agent" # Only first case checks "gen_ai.request.messages" until further input handling work. param_id = request.node.callspec.id if "string" in param_id and instructions is None: # type: ignore - assert "gen_ai.system_instructions" not in ai_client_span["data"] + assert "gen_ai.system_instructions" not in ai_client_span["attributes"] - assert invoke_agent_span["data"]["gen_ai.request.messages"] == safe_serialize( + assert invoke_agent_span["attributes"][ + "gen_ai.request.messages" + ] == safe_serialize( [ {"content": [{"text": "Test input", "type": "text"}], "role": "user"}, ] ) elif "string" in param_id: - assert ai_client_span["data"]["gen_ai.system_instructions"] == safe_serialize( + assert ai_client_span["attributes"][ + "gen_ai.system_instructions" + ] == safe_serialize( [ { "type": "text", @@ -376,13 +384,17 @@ async def test_agent_invocation_span( ] ) elif "blocks_no_type" in param_id and instructions is None: # type: ignore - assert ai_client_span["data"]["gen_ai.system_instructions"] == safe_serialize( + assert ai_client_span["attributes"][ + "gen_ai.system_instructions" + ] == safe_serialize( [ {"type": "text", "content": "You are a helpful assistant."}, ] ) elif "blocks_no_type" in param_id: - assert ai_client_span["data"]["gen_ai.system_instructions"] == safe_serialize( + assert ai_client_span["attributes"][ + "gen_ai.system_instructions" + ] == safe_serialize( [ { "type": "text", @@ -392,13 +404,17 @@ async def test_agent_invocation_span( ] ) elif "blocks" in param_id and instructions is None: # type: ignore - assert ai_client_span["data"]["gen_ai.system_instructions"] == safe_serialize( + assert ai_client_span["attributes"][ + "gen_ai.system_instructions" + ] == safe_serialize( [ {"type": "text", "content": "You are a helpful assistant."}, ] ) elif "blocks" in param_id: - assert ai_client_span["data"]["gen_ai.system_instructions"] == safe_serialize( + assert ai_client_span["attributes"][ + "gen_ai.system_instructions" + ] == safe_serialize( [ { "type": "text", @@ -408,14 +424,18 @@ async def test_agent_invocation_span( ] ) elif "parts_no_type" in param_id and instructions is None: - assert ai_client_span["data"]["gen_ai.system_instructions"] == safe_serialize( + assert ai_client_span["attributes"][ + "gen_ai.system_instructions" + ] == safe_serialize( [ {"type": "text", "content": "You are a helpful assistant."}, {"type": "text", "content": "Be concise and clear."}, ] ) elif "parts_no_type" in param_id: - assert ai_client_span["data"]["gen_ai.system_instructions"] == safe_serialize( + assert ai_client_span["attributes"][ + "gen_ai.system_instructions" + ] == safe_serialize( [ { "type": "text", @@ -426,14 +446,18 @@ async def test_agent_invocation_span( ] ) elif instructions is None: # type: ignore - assert ai_client_span["data"]["gen_ai.system_instructions"] == safe_serialize( + assert ai_client_span["attributes"][ + "gen_ai.system_instructions" + ] == safe_serialize( [ {"type": "text", "content": "You are a helpful assistant."}, {"type": "text", "content": "Be concise and clear."}, ] ) else: - assert ai_client_span["data"]["gen_ai.system_instructions"] == safe_serialize( + assert ai_client_span["attributes"][ + "gen_ai.system_instructions" + ] == safe_serialize( [ { "type": "text", @@ -445,32 +469,32 @@ async def test_agent_invocation_span( ) assert ( - invoke_agent_span["data"]["gen_ai.response.text"] + invoke_agent_span["attributes"]["gen_ai.response.text"] == "Hello, how can I help you?" ) - assert invoke_agent_span["data"]["gen_ai.operation.name"] == "invoke_agent" - assert invoke_agent_span["data"]["gen_ai.system"] == "openai" - assert invoke_agent_span["data"]["gen_ai.agent.name"] == "test_agent" - assert invoke_agent_span["data"]["gen_ai.request.max_tokens"] == 100 - assert invoke_agent_span["data"]["gen_ai.request.model"] == "gpt-4" - assert invoke_agent_span["data"]["gen_ai.request.temperature"] == 0.7 - assert invoke_agent_span["data"]["gen_ai.request.top_p"] == 1.0 + assert invoke_agent_span["attributes"]["gen_ai.operation.name"] == "invoke_agent" + assert invoke_agent_span["attributes"]["gen_ai.system"] == "openai" + assert invoke_agent_span["attributes"]["gen_ai.agent.name"] == "test_agent" + assert invoke_agent_span["attributes"]["gen_ai.request.max_tokens"] == 100 + assert invoke_agent_span["attributes"]["gen_ai.request.model"] == "gpt-4" + assert invoke_agent_span["attributes"]["gen_ai.request.temperature"] == 0.7 + assert invoke_agent_span["attributes"]["gen_ai.request.top_p"] == 1.0 - assert ai_client_span["description"] == "chat gpt-4" - assert ai_client_span["data"]["gen_ai.operation.name"] == "chat" - assert ai_client_span["data"]["gen_ai.system"] == "openai" - assert ai_client_span["data"]["gen_ai.agent.name"] == "test_agent" - assert ai_client_span["data"]["gen_ai.request.max_tokens"] == 100 - assert ai_client_span["data"]["gen_ai.request.model"] == "gpt-4" - assert ai_client_span["data"]["gen_ai.request.temperature"] == 0.7 - assert ai_client_span["data"]["gen_ai.request.top_p"] == 1.0 + assert ai_client_span["name"] == "chat gpt-4" + assert ai_client_span["attributes"]["gen_ai.operation.name"] == "chat" + assert ai_client_span["attributes"]["gen_ai.system"] == "openai" + assert ai_client_span["attributes"]["gen_ai.agent.name"] == "test_agent" + assert ai_client_span["attributes"]["gen_ai.request.max_tokens"] == 100 + assert ai_client_span["attributes"]["gen_ai.request.model"] == "gpt-4" + assert ai_client_span["attributes"]["gen_ai.request.temperature"] == 0.7 + assert ai_client_span["attributes"]["gen_ai.request.top_p"] == 1.0 @pytest.mark.asyncio async def test_client_span_custom_model( sentry_init, - capture_events, + capture_items, test_agent_custom_model, nonstreaming_responses_model_response, get_model_response, @@ -497,7 +521,7 @@ async def test_client_span_custom_model( traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("span") result = await agents.Runner.run( agent, "Test input", run_config=test_run_config @@ -506,17 +530,18 @@ async def test_client_span_custom_model( assert result is not None assert result.final_output == "Hello, how can I help you?" - (transaction,) = events - spans = transaction["spans"] - ai_client_span = next(span for span in spans if span["op"] == OP.GEN_AI_CHAT) + spans = [item.payload for item in items if item.type == "span"] + ai_client_span = next( + span for span in spans if span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + ) - assert ai_client_span["description"] == "chat my-custom-model" - assert ai_client_span["data"]["gen_ai.request.model"] == "my-custom-model" + assert ai_client_span["name"] == "chat my-custom-model" + assert ai_client_span["attributes"]["gen_ai.request.model"] == "my-custom-model" def test_agent_invocation_span_sync_no_pii( sentry_init, - capture_events, + capture_items, test_agent, nonstreaming_responses_model_response, get_model_response, @@ -543,42 +568,46 @@ def test_agent_invocation_span_sync_no_pii( send_default_pii=False, ) - events = capture_events() + items = capture_items("span", "transaction") result = agents.Runner.run_sync(agent, "Test input", run_config=test_run_config) assert result is not None assert result.final_output == "Hello, how can I help you?" - (transaction,) = events - spans = transaction["spans"] - invoke_agent_span = next( - span for span in spans if span["op"] == OP.GEN_AI_INVOKE_AGENT - ) - ai_client_span = next(span for span in spans if span["op"] == OP.GEN_AI_CHAT) - + (transaction,) = (item.payload for item in items if item.type == "transaction") assert transaction["transaction"] == "test_agent workflow" assert transaction["contexts"]["trace"]["origin"] == "auto.ai.openai_agents" - assert invoke_agent_span["description"] == "invoke_agent test_agent" - assert invoke_agent_span["data"]["gen_ai.operation.name"] == "invoke_agent" - assert invoke_agent_span["data"]["gen_ai.system"] == "openai" - assert invoke_agent_span["data"]["gen_ai.agent.name"] == "test_agent" - assert invoke_agent_span["data"]["gen_ai.request.max_tokens"] == 100 - assert invoke_agent_span["data"]["gen_ai.request.model"] == "gpt-4" - assert invoke_agent_span["data"]["gen_ai.request.temperature"] == 0.7 - assert invoke_agent_span["data"]["gen_ai.request.top_p"] == 1.0 + spans = [item.payload for item in items if item.type == "span"] + invoke_agent_span = next( + span + for span in spans + if span["attributes"]["sentry.op"] == OP.GEN_AI_INVOKE_AGENT + ) + ai_client_span = next( + span for span in spans if span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + ) + + assert invoke_agent_span["name"] == "invoke_agent test_agent" + assert invoke_agent_span["attributes"]["gen_ai.operation.name"] == "invoke_agent" + assert invoke_agent_span["attributes"]["gen_ai.system"] == "openai" + assert invoke_agent_span["attributes"]["gen_ai.agent.name"] == "test_agent" + assert invoke_agent_span["attributes"]["gen_ai.request.max_tokens"] == 100 + assert invoke_agent_span["attributes"]["gen_ai.request.model"] == "gpt-4" + assert invoke_agent_span["attributes"]["gen_ai.request.temperature"] == 0.7 + assert invoke_agent_span["attributes"]["gen_ai.request.top_p"] == 1.0 - assert ai_client_span["description"] == "chat gpt-4" - assert ai_client_span["data"]["gen_ai.operation.name"] == "chat" - assert ai_client_span["data"]["gen_ai.system"] == "openai" - assert ai_client_span["data"]["gen_ai.agent.name"] == "test_agent" - assert ai_client_span["data"]["gen_ai.request.max_tokens"] == 100 - assert ai_client_span["data"]["gen_ai.request.model"] == "gpt-4" - assert ai_client_span["data"]["gen_ai.request.temperature"] == 0.7 - assert ai_client_span["data"]["gen_ai.request.top_p"] == 1.0 + assert ai_client_span["name"] == "chat gpt-4" + assert ai_client_span["attributes"]["gen_ai.operation.name"] == "chat" + assert ai_client_span["attributes"]["gen_ai.system"] == "openai" + assert ai_client_span["attributes"]["gen_ai.agent.name"] == "test_agent" + assert ai_client_span["attributes"]["gen_ai.request.max_tokens"] == 100 + assert ai_client_span["attributes"]["gen_ai.request.model"] == "gpt-4" + assert ai_client_span["attributes"]["gen_ai.request.temperature"] == 0.7 + assert ai_client_span["attributes"]["gen_ai.request.top_p"] == 1.0 - assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in invoke_agent_span["data"] + assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in invoke_agent_span["attributes"] @pytest.mark.parametrize( @@ -658,7 +687,7 @@ def test_agent_invocation_span_sync_no_pii( ) def test_agent_invocation_span_sync( sentry_init, - capture_events, + capture_items, test_agent_with_instructions, nonstreaming_responses_model_response, instructions, @@ -688,7 +717,7 @@ def test_agent_invocation_span_sync( send_default_pii=True, ) - events = capture_events() + items = capture_items("span", "transaction") result = agents.Runner.run_sync( agent, @@ -699,36 +728,38 @@ def test_agent_invocation_span_sync( assert result is not None assert result.final_output == "Hello, how can I help you?" - (transaction,) = events - spans = transaction["spans"] - invoke_agent_span, ai_client_span = spans - + (transaction,) = (item.payload for item in items if item.type == "transaction") assert transaction["transaction"] == "test_agent workflow" assert transaction["contexts"]["trace"]["origin"] == "auto.ai.openai_agents" - assert invoke_agent_span["description"] == "invoke_agent test_agent" - assert invoke_agent_span["data"]["gen_ai.operation.name"] == "invoke_agent" - assert invoke_agent_span["data"]["gen_ai.system"] == "openai" - assert invoke_agent_span["data"]["gen_ai.agent.name"] == "test_agent" - assert invoke_agent_span["data"]["gen_ai.request.max_tokens"] == 100 - assert invoke_agent_span["data"]["gen_ai.request.model"] == "gpt-4" - assert invoke_agent_span["data"]["gen_ai.request.temperature"] == 0.7 - assert invoke_agent_span["data"]["gen_ai.request.top_p"] == 1.0 - - assert ai_client_span["description"] == "chat gpt-4" - assert ai_client_span["data"]["gen_ai.operation.name"] == "chat" - assert ai_client_span["data"]["gen_ai.system"] == "openai" - assert ai_client_span["data"]["gen_ai.agent.name"] == "test_agent" - assert ai_client_span["data"]["gen_ai.request.max_tokens"] == 100 - assert ai_client_span["data"]["gen_ai.request.model"] == "gpt-4" - assert ai_client_span["data"]["gen_ai.request.temperature"] == 0.7 - assert ai_client_span["data"]["gen_ai.request.top_p"] == 1.0 + spans = [item.payload for item in items if item.type == "span"] + invoke_agent_span, ai_client_span = spans + + assert invoke_agent_span["name"] == "invoke_agent test_agent" + assert invoke_agent_span["attributes"]["gen_ai.operation.name"] == "invoke_agent" + assert invoke_agent_span["attributes"]["gen_ai.system"] == "openai" + assert invoke_agent_span["attributes"]["gen_ai.agent.name"] == "test_agent" + assert invoke_agent_span["attributes"]["gen_ai.request.max_tokens"] == 100 + assert invoke_agent_span["attributes"]["gen_ai.request.model"] == "gpt-4" + assert invoke_agent_span["attributes"]["gen_ai.request.temperature"] == 0.7 + assert invoke_agent_span["attributes"]["gen_ai.request.top_p"] == 1.0 + + assert ai_client_span["name"] == "chat gpt-4" + assert ai_client_span["attributes"]["gen_ai.operation.name"] == "chat" + assert ai_client_span["attributes"]["gen_ai.system"] == "openai" + assert ai_client_span["attributes"]["gen_ai.agent.name"] == "test_agent" + assert ai_client_span["attributes"]["gen_ai.request.max_tokens"] == 100 + assert ai_client_span["attributes"]["gen_ai.request.model"] == "gpt-4" + assert ai_client_span["attributes"]["gen_ai.request.temperature"] == 0.7 + assert ai_client_span["attributes"]["gen_ai.request.top_p"] == 1.0 param_id = request.node.callspec.id if "string" in param_id and instructions is None: # type: ignore - assert "gen_ai.system_instructions" not in ai_client_span["data"] + assert "gen_ai.system_instructions" not in ai_client_span["attributes"] elif "string" in param_id: - assert ai_client_span["data"]["gen_ai.system_instructions"] == safe_serialize( + assert ai_client_span["attributes"][ + "gen_ai.system_instructions" + ] == safe_serialize( [ { "type": "text", @@ -737,13 +768,17 @@ def test_agent_invocation_span_sync( ] ) elif "blocks_no_type" in param_id and instructions is None: # type: ignore - assert ai_client_span["data"]["gen_ai.system_instructions"] == safe_serialize( + assert ai_client_span["attributes"][ + "gen_ai.system_instructions" + ] == safe_serialize( [ {"type": "text", "content": "You are a helpful assistant."}, ] ) elif "blocks_no_type" in param_id: - assert ai_client_span["data"]["gen_ai.system_instructions"] == safe_serialize( + assert ai_client_span["attributes"][ + "gen_ai.system_instructions" + ] == safe_serialize( [ { "type": "text", @@ -753,13 +788,17 @@ def test_agent_invocation_span_sync( ] ) elif "blocks" in param_id and instructions is None: # type: ignore - assert ai_client_span["data"]["gen_ai.system_instructions"] == safe_serialize( + assert ai_client_span["attributes"][ + "gen_ai.system_instructions" + ] == safe_serialize( [ {"type": "text", "content": "You are a helpful assistant."}, ] ) elif "blocks" in param_id: - assert ai_client_span["data"]["gen_ai.system_instructions"] == safe_serialize( + assert ai_client_span["attributes"][ + "gen_ai.system_instructions" + ] == safe_serialize( [ { "type": "text", @@ -769,14 +808,18 @@ def test_agent_invocation_span_sync( ] ) elif "parts_no_type" in param_id and instructions is None: - assert ai_client_span["data"]["gen_ai.system_instructions"] == safe_serialize( + assert ai_client_span["attributes"][ + "gen_ai.system_instructions" + ] == safe_serialize( [ {"type": "text", "content": "You are a helpful assistant."}, {"type": "text", "content": "Be concise and clear."}, ] ) elif "parts_no_type" in param_id: - assert ai_client_span["data"]["gen_ai.system_instructions"] == safe_serialize( + assert ai_client_span["attributes"][ + "gen_ai.system_instructions" + ] == safe_serialize( [ { "type": "text", @@ -787,14 +830,18 @@ def test_agent_invocation_span_sync( ] ) elif instructions is None: # type: ignore - assert ai_client_span["data"]["gen_ai.system_instructions"] == safe_serialize( + assert ai_client_span["attributes"][ + "gen_ai.system_instructions" + ] == safe_serialize( [ {"type": "text", "content": "You are a helpful assistant."}, {"type": "text", "content": "Be concise and clear."}, ] ) else: - assert ai_client_span["data"]["gen_ai.system_instructions"] == safe_serialize( + assert ai_client_span["attributes"][ + "gen_ai.system_instructions" + ] == safe_serialize( [ { "type": "text", @@ -807,7 +854,7 @@ def test_agent_invocation_span_sync( @pytest.mark.asyncio -async def test_handoff_span(sentry_init, capture_events, get_model_response): +async def test_handoff_span(sentry_init, capture_items, get_model_response): """ Test that handoff spans are created when agents hand off to other agents. """ @@ -910,7 +957,7 @@ async def test_handoff_span(sentry_init, capture_events, get_model_response): traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") result = await agents.Runner.run( primary_agent, @@ -920,21 +967,22 @@ async def test_handoff_span(sentry_init, capture_events, get_model_response): assert result is not None - (transaction,) = events - spans = transaction["spans"] - handoff_span = next(span for span in spans if span.get("op") == OP.GEN_AI_HANDOFF) + spans = [item.payload for item in items if item.type == "span"] + handoff_span = next( + span + for span in spans + if span["attributes"].get("sentry.op") == OP.GEN_AI_HANDOFF + ) # Verify handoff span was created assert handoff_span is not None - assert ( - handoff_span["description"] == "handoff from primary_agent to secondary_agent" - ) - assert handoff_span["data"]["gen_ai.operation.name"] == "handoff" + assert handoff_span["name"] == "handoff from primary_agent to secondary_agent" + assert handoff_span["attributes"]["gen_ai.operation.name"] == "handoff" @pytest.mark.asyncio async def test_max_turns_before_handoff_span( - sentry_init, capture_events, get_model_response + sentry_init, capture_items, get_model_response ): """ Example raising agents.exceptions.AgentsException after the agent invocation span is complete. @@ -1038,7 +1086,7 @@ async def test_max_turns_before_handoff_span( traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") with pytest.raises(MaxTurnsExceeded): await agents.Runner.run( @@ -1048,22 +1096,23 @@ async def test_max_turns_before_handoff_span( max_turns=1, ) - (error, transaction) = events - spans = transaction["spans"] - handoff_span = next(span for span in spans if span.get("op") == OP.GEN_AI_HANDOFF) + spans = [item.payload for item in items if item.type == "span"] + handoff_span = next( + span + for span in spans + if span["attributes"].get("sentry.op") == OP.GEN_AI_HANDOFF + ) # Verify handoff span was created assert handoff_span is not None - assert ( - handoff_span["description"] == "handoff from primary_agent to secondary_agent" - ) - assert handoff_span["data"]["gen_ai.operation.name"] == "handoff" + assert handoff_span["name"] == "handoff from primary_agent to secondary_agent" + assert handoff_span["attributes"]["gen_ai.operation.name"] == "handoff" @pytest.mark.asyncio async def test_tool_execution_span( sentry_init, - capture_events, + capture_items, test_agent, get_model_response, responses_tool_call_model_responses, @@ -1135,7 +1184,7 @@ def simple_test_tool(message: str) -> str: send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") await agents.Runner.run( agent_with_tool, @@ -1143,13 +1192,24 @@ def simple_test_tool(message: str) -> str: run_config=test_run_config, ) - (transaction,) = events - spans = transaction["spans"] - agent_span = next(span for span in spans if span["op"] == OP.GEN_AI_INVOKE_AGENT) + (transaction,) = (item.payload for item in items if item.type == "transaction") + assert transaction["transaction"] == "test_agent workflow" + assert transaction["contexts"]["trace"]["origin"] == "auto.ai.openai_agents" + + spans = [item.payload for item in items if item.type == "span"] + agent_span = next( + span + for span in spans + if span["attributes"]["sentry.op"] == OP.GEN_AI_INVOKE_AGENT + ) ai_client_span1, ai_client_span2 = ( - span for span in spans if span["op"] == OP.GEN_AI_CHAT + span for span in spans if span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + ) + tool_span = next( + span + for span in spans + if span["attributes"]["sentry.op"] == OP.GEN_AI_EXECUTE_TOOL ) - tool_span = next(span for span in spans if span["op"] == OP.GEN_AI_EXECUTE_TOOL) available_tool = { "name": "simple_test_tool", @@ -1189,39 +1249,36 @@ def simple_test_tool(message: str) -> str: } ) - assert transaction["transaction"] == "test_agent workflow" - assert transaction["contexts"]["trace"]["origin"] == "auto.ai.openai_agents" - - assert agent_span["description"] == "invoke_agent test_agent" - assert agent_span["origin"] == "auto.ai.openai_agents" - assert agent_span["data"]["gen_ai.agent.name"] == "test_agent" - assert agent_span["data"]["gen_ai.operation.name"] == "invoke_agent" + assert agent_span["name"] == "invoke_agent test_agent" + assert agent_span["attributes"]["sentry.origin"] == "auto.ai.openai_agents" + assert agent_span["attributes"]["gen_ai.agent.name"] == "test_agent" + assert agent_span["attributes"]["gen_ai.operation.name"] == "invoke_agent" agent_span_available_tool = json.loads( - agent_span["data"]["gen_ai.request.available_tools"] + agent_span["attributes"]["gen_ai.request.available_tools"] )[0] assert all(agent_span_available_tool[k] == v for k, v in available_tool.items()) - assert agent_span["data"]["gen_ai.request.max_tokens"] == 100 - assert agent_span["data"]["gen_ai.request.model"] == "gpt-4" - assert agent_span["data"]["gen_ai.request.temperature"] == 0.7 - assert agent_span["data"]["gen_ai.request.top_p"] == 1.0 - assert agent_span["data"]["gen_ai.system"] == "openai" + assert agent_span["attributes"]["gen_ai.request.max_tokens"] == 100 + assert agent_span["attributes"]["gen_ai.request.model"] == "gpt-4" + assert agent_span["attributes"]["gen_ai.request.temperature"] == 0.7 + assert agent_span["attributes"]["gen_ai.request.top_p"] == 1.0 + assert agent_span["attributes"]["gen_ai.system"] == "openai" - assert ai_client_span1["description"] == "chat gpt-4" - assert ai_client_span1["data"]["gen_ai.operation.name"] == "chat" - assert ai_client_span1["data"]["gen_ai.system"] == "openai" - assert ai_client_span1["data"]["gen_ai.agent.name"] == "test_agent" + assert ai_client_span1["name"] == "chat gpt-4" + assert ai_client_span1["attributes"]["gen_ai.operation.name"] == "chat" + assert ai_client_span1["attributes"]["gen_ai.system"] == "openai" + assert ai_client_span1["attributes"]["gen_ai.agent.name"] == "test_agent" ai_client_span1_available_tool = json.loads( - ai_client_span1["data"]["gen_ai.request.available_tools"] + ai_client_span1["attributes"]["gen_ai.request.available_tools"] )[0] assert all( ai_client_span1_available_tool[k] == v for k, v in available_tool.items() ) - assert ai_client_span1["data"]["gen_ai.request.max_tokens"] == 100 - assert ai_client_span1["data"]["gen_ai.request.messages"] == safe_serialize( + assert ai_client_span1["attributes"]["gen_ai.request.max_tokens"] == 100 + assert ai_client_span1["attributes"]["gen_ai.request.messages"] == safe_serialize( [ { "role": "user", @@ -1231,14 +1288,14 @@ def simple_test_tool(message: str) -> str: }, ] ) - assert ai_client_span1["data"]["gen_ai.request.model"] == "gpt-4" - assert ai_client_span1["data"]["gen_ai.request.temperature"] == 0.7 - assert ai_client_span1["data"]["gen_ai.request.top_p"] == 1.0 - assert ai_client_span1["data"]["gen_ai.usage.input_tokens"] == 10 - assert ai_client_span1["data"]["gen_ai.usage.input_tokens.cached"] == 0 - assert ai_client_span1["data"]["gen_ai.usage.output_tokens"] == 5 - assert ai_client_span1["data"]["gen_ai.usage.output_tokens.reasoning"] == 0 - assert ai_client_span1["data"]["gen_ai.usage.total_tokens"] == 15 + assert ai_client_span1["attributes"]["gen_ai.request.model"] == "gpt-4" + assert ai_client_span1["attributes"]["gen_ai.request.temperature"] == 0.7 + assert ai_client_span1["attributes"]["gen_ai.request.top_p"] == 1.0 + assert ai_client_span1["attributes"]["gen_ai.usage.input_tokens"] == 10 + assert ai_client_span1["attributes"]["gen_ai.usage.input_tokens.cached"] == 0 + assert ai_client_span1["attributes"]["gen_ai.usage.output_tokens"] == 5 + assert ai_client_span1["attributes"]["gen_ai.usage.output_tokens.reasoning"] == 0 + assert ai_client_span1["attributes"]["gen_ai.usage.total_tokens"] == 15 tool_call = { "arguments": '{"message": "hello"}', @@ -1252,41 +1309,41 @@ def simple_test_tool(message: str) -> str: if OPENAI_VERSION >= (2, 25, 0): tool_call["namespace"] = None - assert json.loads(ai_client_span1["data"]["gen_ai.response.tool_calls"]) == [ + assert json.loads(ai_client_span1["attributes"]["gen_ai.response.tool_calls"]) == [ tool_call ] - assert tool_span["description"] == "execute_tool simple_test_tool" - assert tool_span["data"]["gen_ai.agent.name"] == "test_agent" - assert tool_span["data"]["gen_ai.operation.name"] == "execute_tool" + assert tool_span["name"] == "execute_tool simple_test_tool" + assert tool_span["attributes"]["gen_ai.agent.name"] == "test_agent" + assert tool_span["attributes"]["gen_ai.operation.name"] == "execute_tool" tool_span_available_tool = json.loads( - tool_span["data"]["gen_ai.request.available_tools"] + tool_span["attributes"]["gen_ai.request.available_tools"] )[0] assert all(tool_span_available_tool[k] == v for k, v in available_tool.items()) - assert tool_span["data"]["gen_ai.request.max_tokens"] == 100 - assert tool_span["data"]["gen_ai.request.model"] == "gpt-4" - assert tool_span["data"]["gen_ai.request.temperature"] == 0.7 - assert tool_span["data"]["gen_ai.request.top_p"] == 1.0 - assert tool_span["data"]["gen_ai.system"] == "openai" - assert tool_span["data"]["gen_ai.tool.description"] == "A simple tool" - assert tool_span["data"]["gen_ai.tool.input"] == '{"message": "hello"}' - assert tool_span["data"]["gen_ai.tool.name"] == "simple_test_tool" - assert tool_span["data"]["gen_ai.tool.output"] == "Tool executed with: hello" - assert ai_client_span2["description"] == "chat gpt-4" - assert ai_client_span2["data"]["gen_ai.agent.name"] == "test_agent" - assert ai_client_span2["data"]["gen_ai.operation.name"] == "chat" + assert tool_span["attributes"]["gen_ai.request.max_tokens"] == 100 + assert tool_span["attributes"]["gen_ai.request.model"] == "gpt-4" + assert tool_span["attributes"]["gen_ai.request.temperature"] == 0.7 + assert tool_span["attributes"]["gen_ai.request.top_p"] == 1.0 + assert tool_span["attributes"]["gen_ai.system"] == "openai" + assert tool_span["attributes"]["gen_ai.tool.description"] == "A simple tool" + assert tool_span["attributes"]["gen_ai.tool.input"] == '{"message": "hello"}' + assert tool_span["attributes"]["gen_ai.tool.name"] == "simple_test_tool" + assert tool_span["attributes"]["gen_ai.tool.output"] == "Tool executed with: hello" + assert ai_client_span2["name"] == "chat gpt-4" + assert ai_client_span2["attributes"]["gen_ai.agent.name"] == "test_agent" + assert ai_client_span2["attributes"]["gen_ai.operation.name"] == "chat" ai_client_span2_available_tool = json.loads( - ai_client_span2["data"]["gen_ai.request.available_tools"] + ai_client_span2["attributes"]["gen_ai.request.available_tools"] )[0] assert all( ai_client_span2_available_tool[k] == v for k, v in available_tool.items() ) - assert ai_client_span2["data"]["gen_ai.request.max_tokens"] == 100 - assert ai_client_span2["data"]["gen_ai.request.messages"] == safe_serialize( + assert ai_client_span2["attributes"]["gen_ai.request.max_tokens"] == 100 + assert ai_client_span2["attributes"]["gen_ai.request.messages"] == safe_serialize( [ { "role": "tool", @@ -1300,19 +1357,19 @@ def simple_test_tool(message: str) -> str: }, ] ) - assert ai_client_span2["data"]["gen_ai.request.model"] == "gpt-4" - assert ai_client_span2["data"]["gen_ai.request.temperature"] == 0.7 - assert ai_client_span2["data"]["gen_ai.request.top_p"] == 1.0 + assert ai_client_span2["attributes"]["gen_ai.request.model"] == "gpt-4" + assert ai_client_span2["attributes"]["gen_ai.request.temperature"] == 0.7 + assert ai_client_span2["attributes"]["gen_ai.request.top_p"] == 1.0 assert ( - ai_client_span2["data"]["gen_ai.response.text"] + ai_client_span2["attributes"]["gen_ai.response.text"] == "Task completed using the tool" ) - assert ai_client_span2["data"]["gen_ai.system"] == "openai" - assert ai_client_span2["data"]["gen_ai.usage.input_tokens.cached"] == 0 - assert ai_client_span2["data"]["gen_ai.usage.input_tokens"] == 15 - assert ai_client_span2["data"]["gen_ai.usage.output_tokens.reasoning"] == 0 - assert ai_client_span2["data"]["gen_ai.usage.output_tokens"] == 10 - assert ai_client_span2["data"]["gen_ai.usage.total_tokens"] == 25 + assert ai_client_span2["attributes"]["gen_ai.system"] == "openai" + assert ai_client_span2["attributes"]["gen_ai.usage.input_tokens.cached"] == 0 + assert ai_client_span2["attributes"]["gen_ai.usage.input_tokens"] == 15 + assert ai_client_span2["attributes"]["gen_ai.usage.output_tokens.reasoning"] == 0 + assert ai_client_span2["attributes"]["gen_ai.usage.output_tokens"] == 10 + assert ai_client_span2["attributes"]["gen_ai.usage.total_tokens"] == 25 @pytest.mark.asyncio @@ -1570,7 +1627,7 @@ async def test_hosted_mcp_tool_propagation_headers( @pytest.mark.asyncio -async def test_model_behavior_error(sentry_init, capture_events, test_agent): +async def test_model_behavior_error(sentry_init, capture_items, test_agent): """ Example raising agents.exceptions.AgentsException before the agent invocation span is complete. The mocked API response indicates that "wrong_tool" was called. @@ -1613,7 +1670,7 @@ def simple_test_tool(message: str) -> str: send_default_pii=True, ) - events = capture_events() + items = capture_items("span", "transaction") with pytest.raises(ModelBehaviorError): await agents.Runner.run( @@ -1622,26 +1679,25 @@ def simple_test_tool(message: str) -> str: run_config=test_run_config, ) - (error, transaction) = events - spans = transaction["spans"] + (transaction,) = (item.payload for item in items if item.type == "transaction") + assert transaction["transaction"] == "test_agent workflow" + assert transaction["contexts"]["trace"]["origin"] == "auto.ai.openai_agents" + + spans = [item.payload for item in items if item.type == "span"] ( agent_span, ai_client_span1, ) = spans - assert transaction["transaction"] == "test_agent workflow" - assert transaction["contexts"]["trace"]["origin"] == "auto.ai.openai_agents" - - assert agent_span["description"] == "invoke_agent test_agent" - assert agent_span["origin"] == "auto.ai.openai_agents" + assert agent_span["name"] == "invoke_agent test_agent" + assert agent_span["attributes"]["sentry.origin"] == "auto.ai.openai_agents" # Error due to unrecognized tool in model response. - assert agent_span["status"] == "internal_error" - assert agent_span["tags"]["status"] == "internal_error" + assert agent_span["status"] == "error" @pytest.mark.asyncio -async def test_error_handling(sentry_init, capture_events, test_agent): +async def test_error_handling(sentry_init, capture_items, test_agent): """ Test error handling in agent execution. """ @@ -1660,39 +1716,35 @@ async def test_error_handling(sentry_init, capture_events, test_agent): traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("event", "span", "transaction") with pytest.raises(Exception, match="Model Error"): await agents.Runner.run( test_agent, "Test input", run_config=test_run_config ) - ( - error_event, - transaction, - ) = events - + (error_event,) = (item.payload for item in items if item.type == "event") assert error_event["exception"]["values"][0]["type"] == "Exception" assert error_event["exception"]["values"][0]["value"] == "Model Error" assert error_event["exception"]["values"][0]["mechanism"]["type"] == "openai_agents" - spans = transaction["spans"] - (invoke_agent_span, ai_client_span) = spans - + (transaction,) = (item.payload for item in items if item.type == "transaction") assert transaction["transaction"] == "test_agent workflow" assert transaction["contexts"]["trace"]["origin"] == "auto.ai.openai_agents" - assert invoke_agent_span["description"] == "invoke_agent test_agent" - assert invoke_agent_span["origin"] == "auto.ai.openai_agents" + spans = [item.payload for item in items if item.type == "span"] + (invoke_agent_span, ai_client_span) = spans + + assert invoke_agent_span["name"] == "invoke_agent test_agent" + assert invoke_agent_span["attributes"]["sentry.origin"] == "auto.ai.openai_agents" - assert ai_client_span["description"] == "chat gpt-4" - assert ai_client_span["origin"] == "auto.ai.openai_agents" - assert ai_client_span["status"] == "internal_error" - assert ai_client_span["tags"]["status"] == "internal_error" + assert ai_client_span["name"] == "chat gpt-4" + assert ai_client_span["attributes"]["sentry.origin"] == "auto.ai.openai_agents" + assert ai_client_span["status"] == "error" @pytest.mark.asyncio -async def test_error_captures_input_data(sentry_init, capture_events, test_agent): +async def test_error_captures_input_data(sentry_init, capture_items, test_agent): """ Test that input data is captured even when the API call raises an exception. This verifies that _set_input_data is called before the API call. @@ -1725,37 +1777,34 @@ async def test_error_captures_input_data(sentry_init, capture_events, test_agent send_default_pii=True, ) - events = capture_events() + items = capture_items("event", "span") with pytest.raises(InternalServerError, match="Error code: 500"): await agents.Runner.run(agent, "Test input", run_config=test_run_config) - ( - error_event, - transaction, - ) = events - + (error_event,) = (item.payload for item in items if item.type == "event") assert error_event["exception"]["values"][0]["type"] == "InternalServerError" assert error_event["exception"]["values"][0]["value"] == "Error code: 500" - spans = transaction["spans"] - ai_client_span = [s for s in spans if s["op"] == "gen_ai.chat"][0] + spans = [item.payload for item in items if item.type == "span"] + ai_client_span = [ + s for s in spans if s["attributes"].get("sentry.op", "") == "gen_ai.chat" + ][0] - assert ai_client_span["description"] == "chat gpt-4" - assert ai_client_span["status"] == "internal_error" - assert ai_client_span["tags"]["status"] == "internal_error" + assert ai_client_span["name"] == "chat gpt-4" + assert ai_client_span["status"] == "error" - assert "gen_ai.request.messages" in ai_client_span["data"] + assert "gen_ai.request.messages" in ai_client_span["attributes"] request_messages = safe_serialize( [ {"role": "user", "content": [{"type": "text", "text": "Test input"}]}, ] ) - assert ai_client_span["data"]["gen_ai.request.messages"] == request_messages + assert ai_client_span["attributes"]["gen_ai.request.messages"] == request_messages @pytest.mark.asyncio -async def test_span_status_error(sentry_init, capture_events, test_agent): +async def test_span_status_error(sentry_init, capture_items, test_agent): with patch.dict(os.environ, {"OPENAI_API_KEY": "test-key"}): with patch( "agents.models.openai_responses.OpenAIResponsesModel.get_response" @@ -1770,23 +1819,26 @@ async def test_span_status_error(sentry_init, capture_events, test_agent): traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("event", "transaction", "span") with pytest.raises(ValueError, match="Model Error"): await agents.Runner.run( test_agent, "Test input", run_config=test_run_config ) - (error, transaction) = events + (error,) = (item.payload for item in items if item.type == "event") assert error["level"] == "error" - assert transaction["spans"][0]["status"] == "internal_error" - assert transaction["spans"][0]["tags"]["status"] == "internal_error" + + spans = [item.payload for item in items if item.type == "span"] + assert spans[0]["status"] == "error" + + (transaction,) = (item.payload for item in items if item.type == "transaction") assert transaction["contexts"]["trace"]["status"] == "internal_error" @pytest.mark.asyncio async def test_mcp_tool_execution_spans( - sentry_init, capture_events, test_agent, get_model_response + sentry_init, capture_items, test_agent, get_model_response ): """ Test that MCP (Model Context Protocol) tool calls create execute_tool spans. @@ -1880,7 +1932,7 @@ async def test_mcp_tool_execution_spans( send_default_pii=True, ) - events = capture_events() + items = capture_items("span", "transaction") await agents.Runner.run( agent, @@ -1888,33 +1940,35 @@ async def test_mcp_tool_execution_spans( run_config=test_run_config, ) - (transaction,) = events - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] # Find the MCP execute_tool span mcp_tool_span = None for span in spans: - if span.get("description") == "execute_tool test_mcp_tool": + if span.get("name") == "execute_tool test_mcp_tool": mcp_tool_span = span break # Verify the MCP tool span was created assert mcp_tool_span is not None, "MCP execute_tool span was not created" - assert mcp_tool_span["description"] == "execute_tool test_mcp_tool" - assert mcp_tool_span["data"]["gen_ai.tool.name"] == "test_mcp_tool" - assert mcp_tool_span["data"]["gen_ai.tool.input"] == '{"query": "search term"}' + assert mcp_tool_span["name"] == "execute_tool test_mcp_tool" + assert mcp_tool_span["attributes"]["gen_ai.tool.name"] == "test_mcp_tool" assert ( - mcp_tool_span["data"]["gen_ai.tool.output"] == "MCP tool executed successfully" + mcp_tool_span["attributes"]["gen_ai.tool.input"] == '{"query": "search term"}' + ) + assert ( + mcp_tool_span["attributes"]["gen_ai.tool.output"] + == "MCP tool executed successfully" ) # Verify no error status since error was None - assert mcp_tool_span.get("status") != "internal_error" - assert mcp_tool_span.get("tags", {}).get("status") != "internal_error" + assert mcp_tool_span.get("status") != "error" + assert mcp_tool_span.get("tags", {}).get("status") != "error" @pytest.mark.asyncio async def test_mcp_tool_execution_with_error( - sentry_init, capture_events, test_agent, get_model_response + sentry_init, capture_items, test_agent, get_model_response ): """ Test that MCP tool calls with errors are tracked with error status. @@ -2008,7 +2062,7 @@ async def test_mcp_tool_execution_with_error( send_default_pii=True, ) - events = capture_events() + items = capture_items("span", "transaction") await agents.Runner.run( agent, @@ -2016,31 +2070,29 @@ async def test_mcp_tool_execution_with_error( run_config=test_run_config, ) - (transaction,) = events - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] # Find the MCP execute_tool span with error mcp_tool_span = None for span in spans: - if span.get("description") == "execute_tool failing_mcp_tool": + if span.get("name") == "execute_tool failing_mcp_tool": mcp_tool_span = span break # Verify the MCP tool span was created with error status assert mcp_tool_span is not None, "MCP execute_tool span was not created" - assert mcp_tool_span["description"] == "execute_tool failing_mcp_tool" - assert mcp_tool_span["data"]["gen_ai.tool.name"] == "failing_mcp_tool" - assert mcp_tool_span["data"]["gen_ai.tool.input"] == '{"query": "test"}' - assert mcp_tool_span["data"]["gen_ai.tool.output"] is None + assert mcp_tool_span["name"] == "execute_tool failing_mcp_tool" + assert mcp_tool_span["attributes"]["gen_ai.tool.name"] == "failing_mcp_tool" + assert mcp_tool_span["attributes"]["gen_ai.tool.input"] == '{"query": "test"}' + assert mcp_tool_span["attributes"]["gen_ai.tool.output"] == "None" # Verify error status was set - assert mcp_tool_span["status"] == "internal_error" - assert mcp_tool_span["tags"]["status"] == "internal_error" + assert mcp_tool_span["status"] == "error" @pytest.mark.asyncio async def test_mcp_tool_execution_without_pii( - sentry_init, capture_events, test_agent, get_model_response + sentry_init, capture_items, test_agent, get_model_response ): """ Test that MCP tool input/output are not included when send_default_pii is False. @@ -2134,7 +2186,7 @@ async def test_mcp_tool_execution_without_pii( send_default_pii=False, # PII disabled ) - events = capture_events() + items = capture_items("span", "transaction") await agents.Runner.run( agent, @@ -2142,30 +2194,29 @@ async def test_mcp_tool_execution_without_pii( run_config=test_run_config, ) - (transaction,) = events - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] # Find the MCP execute_tool span mcp_tool_span = None for span in spans: - if span.get("description") == "execute_tool test_mcp_tool": + if span.get("name") == "execute_tool test_mcp_tool": mcp_tool_span = span break # Verify the MCP tool span was created but without input/output assert mcp_tool_span is not None, "MCP execute_tool span was not created" - assert mcp_tool_span["description"] == "execute_tool test_mcp_tool" - assert mcp_tool_span["data"]["gen_ai.tool.name"] == "test_mcp_tool" + assert mcp_tool_span["name"] == "execute_tool test_mcp_tool" + assert mcp_tool_span["attributes"]["gen_ai.tool.name"] == "test_mcp_tool" # Verify input and output are not included when send_default_pii is False - assert "gen_ai.tool.input" not in mcp_tool_span["data"] - assert "gen_ai.tool.output" not in mcp_tool_span["data"] + assert "gen_ai.tool.input" not in mcp_tool_span["attributes"] + assert "gen_ai.tool.output" not in mcp_tool_span["attributes"] @pytest.mark.asyncio async def test_multiple_agents_asyncio( sentry_init, - capture_events, + capture_items, test_agent, nonstreaming_responses_model_response, get_model_response, @@ -2192,7 +2243,7 @@ async def test_multiple_agents_asyncio( traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("span", "transaction") async def run(): await agents.Runner.run( @@ -2203,14 +2254,10 @@ async def run(): await asyncio.gather(*[run() for _ in range(3)]) - assert len(events) == 3 - txn1, txn2, txn3 = events + txn1, txn2, txn3 = (item.payload for item in items if item.type == "transaction") - assert txn1["type"] == "transaction" assert txn1["transaction"] == "test_agent workflow" - assert txn2["type"] == "transaction" assert txn2["transaction"] == "test_agent workflow" - assert txn3["type"] == "transaction" assert txn3["transaction"] == "test_agent workflow" @@ -2230,7 +2277,7 @@ async def run(): ], ) def test_openai_agents_message_role_mapping( - sentry_init, capture_events, test_message, expected_role + sentry_init, capture_items, test_message, expected_role ): """Test that OpenAI Agents integration properly maps message roles like 'ai' to 'assistant'""" sentry_init( @@ -2259,7 +2306,7 @@ def test_openai_agents_message_role_mapping( @pytest.mark.asyncio async def test_tool_execution_error_tracing( sentry_init, - capture_events, + capture_items, test_agent, get_model_response, responses_tool_call_model_responses, @@ -2338,7 +2385,7 @@ def failing_tool(message: str) -> str: send_default_pii=True, ) - events = capture_events() + items = capture_items("span", "transaction") # Note: The agents library catches tool exceptions internally, # so we don't expect this to raise @@ -2348,13 +2395,12 @@ def failing_tool(message: str) -> str: run_config=test_run_config, ) - (transaction,) = events - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] # Find the execute_tool span execute_tool_span = None for span in spans: - description = span.get("description", "") + description = span.get("name", "") if description is not None and description.startswith( "execute_tool failing_tool" ): @@ -2363,19 +2409,18 @@ def failing_tool(message: str) -> str: # Verify the execute_tool span was created assert execute_tool_span is not None, "execute_tool span was not created" - assert execute_tool_span["description"] == "execute_tool failing_tool" - assert execute_tool_span["data"]["gen_ai.tool.name"] == "failing_tool" + assert execute_tool_span["name"] == "execute_tool failing_tool" + assert execute_tool_span["attributes"]["gen_ai.tool.name"] == "failing_tool" # Verify error status was set (this is the key test for our patch) # The span should be marked as error because the tool execution failed - assert execute_tool_span["status"] == "internal_error" - assert execute_tool_span["tags"]["status"] == "internal_error" + assert execute_tool_span["status"] == "error" @pytest.mark.asyncio async def test_invoke_agent_span_includes_usage_data( sentry_init, - capture_events, + capture_items, test_agent, get_model_response, ): @@ -2437,7 +2482,7 @@ async def test_invoke_agent_span_includes_usage_data( send_default_pii=True, ) - events = capture_events() + items = capture_items("span", "transaction") result = await agents.Runner.run( agent, "Test input", run_config=test_run_config @@ -2445,29 +2490,30 @@ async def test_invoke_agent_span_includes_usage_data( assert result is not None - (transaction,) = events - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] invoke_agent_span = next( - span for span in spans if span["op"] == OP.GEN_AI_INVOKE_AGENT + span + for span in spans + if span["attributes"]["sentry.op"] == OP.GEN_AI_INVOKE_AGENT ) # Verify invoke_agent span has usage data from context_wrapper - assert invoke_agent_span["description"] == "invoke_agent test_agent" - assert "gen_ai.usage.input_tokens" in invoke_agent_span["data"] - assert "gen_ai.usage.output_tokens" in invoke_agent_span["data"] - assert "gen_ai.usage.total_tokens" in invoke_agent_span["data"] + assert invoke_agent_span["name"] == "invoke_agent test_agent" + assert "gen_ai.usage.input_tokens" in invoke_agent_span["attributes"] + assert "gen_ai.usage.output_tokens" in invoke_agent_span["attributes"] + assert "gen_ai.usage.total_tokens" in invoke_agent_span["attributes"] - assert invoke_agent_span["data"]["gen_ai.usage.input_tokens"] == 10 - assert invoke_agent_span["data"]["gen_ai.usage.output_tokens"] == 20 - assert invoke_agent_span["data"]["gen_ai.usage.total_tokens"] == 30 - assert invoke_agent_span["data"]["gen_ai.usage.input_tokens.cached"] == 0 - assert invoke_agent_span["data"]["gen_ai.usage.output_tokens.reasoning"] == 5 + assert invoke_agent_span["attributes"]["gen_ai.usage.input_tokens"] == 10 + assert invoke_agent_span["attributes"]["gen_ai.usage.output_tokens"] == 20 + assert invoke_agent_span["attributes"]["gen_ai.usage.total_tokens"] == 30 + assert invoke_agent_span["attributes"]["gen_ai.usage.input_tokens.cached"] == 0 + assert invoke_agent_span["attributes"]["gen_ai.usage.output_tokens.reasoning"] == 5 @pytest.mark.asyncio async def test_ai_client_span_includes_response_model( sentry_init, - capture_events, + capture_items, test_agent, get_model_response, ): @@ -2529,7 +2575,7 @@ async def test_ai_client_span_includes_response_model( send_default_pii=True, ) - events = capture_events() + items = capture_items("span", "transaction") result = await agents.Runner.run( agent, "Test input", run_config=test_run_config @@ -2537,20 +2583,21 @@ async def test_ai_client_span_includes_response_model( assert result is not None - (transaction,) = events - spans = transaction["spans"] - ai_client_span = next(span for span in spans if span["op"] == OP.GEN_AI_CHAT) + spans = [item.payload for item in items if item.type == "span"] + ai_client_span = next( + span for span in spans if span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + ) # Verify ai_client span has response model from API response - assert ai_client_span["description"] == "chat gpt-4" - assert "gen_ai.response.model" in ai_client_span["data"] - assert ai_client_span["data"]["gen_ai.response.model"] == "gpt-4.1-2025-04-14" + assert ai_client_span["name"] == "chat gpt-4" + assert "gen_ai.response.model" in ai_client_span["attributes"] + assert ai_client_span["attributes"]["gen_ai.response.model"] == "gpt-4.1-2025-04-14" @pytest.mark.asyncio async def test_ai_client_span_response_model_with_chat_completions( sentry_init, - capture_events, + capture_items, get_model_response, ): """ @@ -2616,7 +2663,7 @@ async def test_ai_client_span_response_model_with_chat_completions( traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("span", "transaction") result = await agents.Runner.run( agent, "Test input", run_config=test_run_config @@ -2624,18 +2671,22 @@ async def test_ai_client_span_response_model_with_chat_completions( assert result is not None - (transaction,) = events - spans = transaction["spans"] - ai_client_span = next(span for span in spans if span["op"] == OP.GEN_AI_CHAT) + spans = [item.payload for item in items if item.type == "span"] + ai_client_span = next( + span for span in spans if span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT + ) # Verify response model from API response is captured - assert "gen_ai.response.model" in ai_client_span["data"] - assert ai_client_span["data"]["gen_ai.response.model"] == "gpt-4o-mini-2024-07-18" + assert "gen_ai.response.model" in ai_client_span["attributes"] + assert ( + ai_client_span["attributes"]["gen_ai.response.model"] + == "gpt-4o-mini-2024-07-18" + ) @pytest.mark.asyncio async def test_multiple_llm_calls_aggregate_usage( - sentry_init, capture_events, test_agent, get_model_response + sentry_init, capture_items, test_agent, get_model_response ): """ Test that invoke_agent spans show aggregated usage across multiple LLM calls @@ -2734,7 +2785,7 @@ def calculator(a: int, b: int) -> int: send_default_pii=True, ) - events = capture_events() + items = capture_items("span", "transaction") result = await agents.Runner.run( agent_with_tool, @@ -2744,25 +2795,24 @@ def calculator(a: int, b: int) -> int: assert result is not None - (transaction,) = events - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] invoke_agent_span = spans[0] # Verify invoke_agent span has aggregated usage from both API calls # Total: 10 + 20 = 30 input tokens, 5 + 15 = 20 output tokens, 15 + 35 = 50 total - assert invoke_agent_span["data"]["gen_ai.usage.input_tokens"] == 30 - assert invoke_agent_span["data"]["gen_ai.usage.output_tokens"] == 20 - assert invoke_agent_span["data"]["gen_ai.usage.total_tokens"] == 50 + assert invoke_agent_span["attributes"]["gen_ai.usage.input_tokens"] == 30 + assert invoke_agent_span["attributes"]["gen_ai.usage.output_tokens"] == 20 + assert invoke_agent_span["attributes"]["gen_ai.usage.total_tokens"] == 50 # Cached tokens should be aggregated: 0 + 5 = 5 - assert invoke_agent_span["data"]["gen_ai.usage.input_tokens.cached"] == 5 + assert invoke_agent_span["attributes"]["gen_ai.usage.input_tokens.cached"] == 5 # Reasoning tokens should be aggregated: 0 + 3 = 3 - assert invoke_agent_span["data"]["gen_ai.usage.output_tokens.reasoning"] == 3 + assert invoke_agent_span["attributes"]["gen_ai.usage.output_tokens.reasoning"] == 3 @pytest.mark.asyncio async def test_invoke_agent_span_includes_response_model( sentry_init, - capture_events, + capture_items, test_agent, get_model_response, ): @@ -2823,7 +2873,7 @@ async def test_invoke_agent_span_includes_response_model( send_default_pii=True, ) - events = capture_events() + items = capture_items("span", "transaction") result = await agents.Runner.run( agent, "Test input", run_config=test_run_config @@ -2831,27 +2881,32 @@ async def test_invoke_agent_span_includes_response_model( assert result is not None - (transaction,) = events - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] invoke_agent_span = next( - span for span in spans if span["op"] == OP.GEN_AI_INVOKE_AGENT + span + for span in spans + if span["attributes"]["sentry.op"] == OP.GEN_AI_INVOKE_AGENT + ) + ai_client_span = next( + span for span in spans if span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT ) - ai_client_span = next(span for span in spans if span["op"] == OP.GEN_AI_CHAT) # Verify invoke_agent span has response model from API - assert invoke_agent_span["description"] == "invoke_agent test_agent" - assert "gen_ai.response.model" in invoke_agent_span["data"] - assert invoke_agent_span["data"]["gen_ai.response.model"] == "gpt-4.1-2025-04-14" + assert invoke_agent_span["name"] == "invoke_agent test_agent" + assert "gen_ai.response.model" in invoke_agent_span["attributes"] + assert ( + invoke_agent_span["attributes"]["gen_ai.response.model"] == "gpt-4.1-2025-04-14" + ) # Also verify ai_client span has it - assert "gen_ai.response.model" in ai_client_span["data"] - assert ai_client_span["data"]["gen_ai.response.model"] == "gpt-4.1-2025-04-14" + assert "gen_ai.response.model" in ai_client_span["attributes"] + assert ai_client_span["attributes"]["gen_ai.response.model"] == "gpt-4.1-2025-04-14" @pytest.mark.asyncio async def test_invoke_agent_span_uses_last_response_model( sentry_init, - capture_events, + capture_items, test_agent, get_model_response, ): @@ -2952,7 +3007,7 @@ def calculator(a: int, b: int) -> int: send_default_pii=True, ) - events = capture_events() + items = capture_items("span", "transaction") result = await agents.Runner.run( agent_with_tool, @@ -2962,24 +3017,26 @@ def calculator(a: int, b: int) -> int: assert result is not None - (transaction,) = events - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] invoke_agent_span = spans[0] first_ai_client_span = spans[1] second_ai_client_span = spans[3] # After tool span # Invoke_agent span uses the LAST response model - assert "gen_ai.response.model" in invoke_agent_span["data"] - assert invoke_agent_span["data"]["gen_ai.response.model"] == "gpt-4.1-2025-04-14" + assert "gen_ai.response.model" in invoke_agent_span["attributes"] + assert ( + invoke_agent_span["attributes"]["gen_ai.response.model"] == "gpt-4.1-2025-04-14" + ) # Each ai_client span has its own response model from the API - assert first_ai_client_span["data"]["gen_ai.response.model"] == "gpt-4-0613" + assert first_ai_client_span["attributes"]["gen_ai.response.model"] == "gpt-4-0613" assert ( - second_ai_client_span["data"]["gen_ai.response.model"] == "gpt-4.1-2025-04-14" + second_ai_client_span["attributes"]["gen_ai.response.model"] + == "gpt-4.1-2025-04-14" ) -def test_openai_agents_message_truncation(sentry_init, capture_events): +def test_openai_agents_message_truncation(sentry_init, capture_items): """Test that large messages are truncated properly in OpenAI Agents integration.""" large_content = ( @@ -3230,7 +3287,7 @@ async def test_streaming_ttft_on_chat_span( @pytest.mark.asyncio async def test_conversation_id_on_all_spans( sentry_init, - capture_events, + capture_items, test_agent, nonstreaming_responses_model_response, get_model_response, @@ -3257,7 +3314,7 @@ async def test_conversation_id_on_all_spans( traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("span", "transaction") result = await agents.Runner.run( agent, @@ -3268,24 +3325,28 @@ async def test_conversation_id_on_all_spans( assert result is not None - (transaction,) = events - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] invoke_agent_span = next( - span for span in spans if span["op"] == OP.GEN_AI_INVOKE_AGENT + span + for span in spans + if span["attributes"]["sentry.op"] == OP.GEN_AI_INVOKE_AGENT + ) + ai_client_span = next( + span for span in spans if span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT ) - ai_client_span = next(span for span in spans if span["op"] == OP.GEN_AI_CHAT) # Verify workflow span (transaction) has conversation_id + (transaction,) = (item.payload for item in items if item.type == "transaction") assert ( transaction["contexts"]["trace"]["data"]["gen_ai.conversation.id"] == "conv_test_123" ) # Verify invoke_agent span has conversation_id - assert invoke_agent_span["data"]["gen_ai.conversation.id"] == "conv_test_123" + assert invoke_agent_span["attributes"]["gen_ai.conversation.id"] == "conv_test_123" # Verify ai_client span has conversation_id - assert ai_client_span["data"]["gen_ai.conversation.id"] == "conv_test_123" + assert ai_client_span["attributes"]["gen_ai.conversation.id"] == "conv_test_123" @pytest.mark.skipif( @@ -3294,7 +3355,7 @@ async def test_conversation_id_on_all_spans( ) @pytest.mark.asyncio async def test_conversation_id_on_tool_span( - sentry_init, capture_events, test_agent, get_model_response + sentry_init, capture_items, test_agent, get_model_response ): """ Test that gen_ai.conversation.id is set on tool execution spans when passed to Runner.run(). @@ -3391,7 +3452,7 @@ def simple_tool(message: str) -> str: traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("span", "transaction") await agents.Runner.run( agent_with_tool, @@ -3400,21 +3461,20 @@ def simple_tool(message: str) -> str: conversation_id="conv_tool_test_456", ) - (transaction,) = events - spans = transaction["spans"] - + spans = [item.payload for item in items if item.type == "span"] # Find the tool span tool_span = None for span in spans: - if span.get("description", "").startswith("execute_tool"): + if span.get("name", "").startswith("execute_tool"): tool_span = span break assert tool_span is not None # Tool span should have the conversation_id passed to Runner.run() - assert tool_span["data"]["gen_ai.conversation.id"] == "conv_tool_test_456" + assert tool_span["attributes"]["gen_ai.conversation.id"] == "conv_tool_test_456" # Workflow span (transaction) should have the same conversation_id + (transaction,) = (item.payload for item in items if item.type == "transaction") assert ( transaction["contexts"]["trace"]["data"]["gen_ai.conversation.id"] == "conv_tool_test_456" @@ -3428,7 +3488,7 @@ def simple_tool(message: str) -> str: @pytest.mark.asyncio async def test_no_conversation_id_when_not_provided( sentry_init, - capture_events, + capture_items, test_agent, nonstreaming_responses_model_response, get_model_response, @@ -3455,7 +3515,7 @@ async def test_no_conversation_id_when_not_provided( traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("span", "transaction") # Don't pass conversation_id result = await agents.Runner.run( @@ -3464,16 +3524,21 @@ async def test_no_conversation_id_when_not_provided( assert result is not None - (transaction,) = events - spans = transaction["spans"] + (transaction,) = (item.payload for item in items if item.type == "transaction") + + spans = [item.payload for item in items if item.type == "span"] invoke_agent_span = next( - span for span in spans if span["op"] == OP.GEN_AI_INVOKE_AGENT + span + for span in spans + if span["attributes"]["sentry.op"] == OP.GEN_AI_INVOKE_AGENT + ) + ai_client_span = next( + span for span in spans if span["attributes"]["sentry.op"] == OP.GEN_AI_CHAT ) - ai_client_span = next(span for span in spans if span["op"] == OP.GEN_AI_CHAT) # Verify conversation_id is NOT set on any spans assert "gen_ai.conversation.id" not in transaction["contexts"]["trace"].get( - "data", {} + "attributes", {} ) - assert "gen_ai.conversation.id" not in invoke_agent_span.get("data", {}) - assert "gen_ai.conversation.id" not in ai_client_span.get("data", {}) + assert "gen_ai.conversation.id" not in invoke_agent_span.get("attributes", {}) + assert "gen_ai.conversation.id" not in ai_client_span.get("attributes", {}) diff --git a/tests/integrations/pydantic_ai/test_pydantic_ai.py b/tests/integrations/pydantic_ai/test_pydantic_ai.py index 50ce155f5b..cfb1ca09ca 100644 --- a/tests/integrations/pydantic_ai/test_pydantic_ai.py +++ b/tests/integrations/pydantic_ai/test_pydantic_ai.py @@ -53,7 +53,7 @@ def inner(): @pytest.mark.asyncio -async def test_agent_run_async(sentry_init, capture_events, get_test_agent): +async def test_agent_run_async(sentry_init, capture_items, get_test_agent): """ Test that the integration creates spans for async agent runs. """ @@ -63,7 +63,7 @@ async def test_agent_run_async(sentry_init, capture_events, get_test_agent): send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") test_agent = get_test_agent() result = await test_agent.run("Test input") @@ -71,8 +71,7 @@ async def test_agent_run_async(sentry_init, capture_events, get_test_agent): assert result is not None assert result.output is not None - (transaction,) = events - spans = transaction["spans"] + (transaction,) = (item.payload for item in items if item.type == "transaction") # Verify transaction (the transaction IS the invoke_agent span) assert transaction["transaction"] == "invoke_agent test_agent" @@ -81,28 +80,31 @@ async def test_agent_run_async(sentry_init, capture_events, get_test_agent): # The transaction itself should have invoke_agent data assert transaction["contexts"]["trace"]["op"] == "gen_ai.invoke_agent" + spans = [item.payload for item in items if item.type == "span"] # Find child span types (invoke_agent is the transaction, not a child span) - chat_spans = [s for s in spans if s["op"] == "gen_ai.chat"] + chat_spans = [ + s for s in spans if s["attributes"].get("sentry.op", "") == "gen_ai.chat" + ] assert len(chat_spans) >= 1 # Check chat span chat_span = chat_spans[0] - assert "chat" in chat_span["description"] - assert chat_span["data"]["gen_ai.operation.name"] == "chat" - assert chat_span["data"]["gen_ai.response.streaming"] is False - assert "gen_ai.request.messages" in chat_span["data"] - assert "gen_ai.usage.input_tokens" in chat_span["data"] - assert "gen_ai.usage.output_tokens" in chat_span["data"] + assert "chat" in chat_span["name"] + assert chat_span["attributes"]["gen_ai.operation.name"] == "chat" + assert chat_span["attributes"]["gen_ai.response.streaming"] is False + assert "gen_ai.request.messages" in chat_span["attributes"] + assert "gen_ai.usage.input_tokens" in chat_span["attributes"] + assert "gen_ai.usage.output_tokens" in chat_span["attributes"] @pytest.mark.asyncio -async def test_agent_run_async_model_error(sentry_init, capture_events): +async def test_agent_run_async_model_error(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("event", "transaction", "span") def failing_model(messages, info): raise RuntimeError("model exploded") @@ -115,17 +117,17 @@ def failing_model(messages, info): with pytest.raises(RuntimeError, match="model exploded"): await agent.run("Test input") - (error, transaction) = events + (error,) = (item.payload for item in items if item.type == "event") assert error["level"] == "error" - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] assert len(spans) == 1 - assert spans[0]["status"] == "internal_error" + assert spans[0]["status"] == "error" @pytest.mark.asyncio -async def test_agent_run_async_usage_data(sentry_init, capture_events, get_test_agent): +async def test_agent_run_async_usage_data(sentry_init, capture_items, get_test_agent): """ Test that the invoke_agent span includes token usage and model data. """ @@ -135,7 +137,7 @@ async def test_agent_run_async_usage_data(sentry_init, capture_events, get_test_ send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") test_agent = get_test_agent() result = await test_agent.run("Test input") @@ -143,8 +145,7 @@ async def test_agent_run_async_usage_data(sentry_init, capture_events, get_test_ assert result is not None assert result.output is not None - (transaction,) = events - + (transaction,) = (item.payload for item in items if item.type == "transaction") # Verify transaction (the transaction IS the invoke_agent span) assert transaction["transaction"] == "invoke_agent test_agent" @@ -170,7 +171,7 @@ async def test_agent_run_async_usage_data(sentry_init, capture_events, get_test_ assert trace_data["gen_ai.response.model"] == "test" # Test model name -def test_agent_run_sync(sentry_init, capture_events, get_test_agent): +def test_agent_run_sync(sentry_init, capture_items, get_test_agent): """ Test that the integration creates spans for sync agent runs. """ @@ -180,7 +181,7 @@ def test_agent_run_sync(sentry_init, capture_events, get_test_agent): send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") test_agent = get_test_agent() result = test_agent.run_sync("Test input") @@ -188,29 +189,31 @@ def test_agent_run_sync(sentry_init, capture_events, get_test_agent): assert result is not None assert result.output is not None - (transaction,) = events - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] # Verify transaction + (transaction,) = (item.payload for item in items if item.type == "transaction") assert transaction["transaction"] == "invoke_agent test_agent" assert transaction["contexts"]["trace"]["origin"] == "auto.ai.pydantic_ai" # Find span types - chat_spans = [s for s in spans if s["op"] == "gen_ai.chat"] + chat_spans = [ + s for s in spans if s["attributes"].get("sentry.op", "") == "gen_ai.chat" + ] assert len(chat_spans) >= 1 # Verify streaming flag is False for sync for chat_span in chat_spans: - assert chat_span["data"]["gen_ai.response.streaming"] is False + assert chat_span["attributes"]["gen_ai.response.streaming"] is False -def test_agent_run_sync_model_error(sentry_init, capture_events): +def test_agent_run_sync_model_error(sentry_init, capture_items): sentry_init( integrations=[PydanticAIIntegration()], traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("event", "transaction", "span") def failing_model(messages, info): raise RuntimeError("model exploded") @@ -223,17 +226,17 @@ def failing_model(messages, info): with pytest.raises(RuntimeError, match="model exploded"): agent.run_sync("Test input") - (error, transaction) = events + (error,) = (item.payload for item in items if item.type == "event") assert error["level"] == "error" - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] assert len(spans) == 1 - assert spans[0]["status"] == "internal_error" + assert spans[0]["status"] == "error" @pytest.mark.asyncio -async def test_agent_run_stream(sentry_init, capture_events, get_test_agent): +async def test_agent_run_stream(sentry_init, capture_items, get_test_agent): """ Test that the integration creates spans for streaming agent runs. """ @@ -243,7 +246,7 @@ async def test_agent_run_stream(sentry_init, capture_events, get_test_agent): send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") test_agent = get_test_agent() async with test_agent.run_stream("Test input") as result: @@ -251,31 +254,33 @@ async def test_agent_run_stream(sentry_init, capture_events, get_test_agent): async for _ in result.stream_output(): pass - (transaction,) = events - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] # Verify transaction + (transaction,) = (item.payload for item in items if item.type == "transaction") assert transaction["transaction"] == "invoke_agent test_agent" assert transaction["contexts"]["trace"]["origin"] == "auto.ai.pydantic_ai" # Find chat spans - chat_spans = [s for s in spans if s["op"] == "gen_ai.chat"] + chat_spans = [ + s for s in spans if s["attributes"].get("sentry.op", "") == "gen_ai.chat" + ] assert len(chat_spans) >= 1 # Verify streaming flag is True for streaming for chat_span in chat_spans: - assert chat_span["data"]["gen_ai.response.streaming"] is True - assert "gen_ai.request.messages" in chat_span["data"] - assert "gen_ai.usage.input_tokens" in chat_span["data"] + assert chat_span["attributes"]["gen_ai.response.streaming"] is True + assert "gen_ai.request.messages" in chat_span["attributes"] + assert "gen_ai.usage.input_tokens" in chat_span["attributes"] # Streaming responses should still have output data assert ( - "gen_ai.response.text" in chat_span["data"] - or "gen_ai.response.model" in chat_span["data"] + "gen_ai.response.text" in chat_span["attributes"] + or "gen_ai.response.model" in chat_span["attributes"] ) @pytest.mark.asyncio -async def test_agent_run_stream_events(sentry_init, capture_events, get_test_agent): +async def test_agent_run_stream_events(sentry_init, capture_items, get_test_agent): """ Test that run_stream_events creates spans (it uses run internally, so non-streaming). """ @@ -285,30 +290,31 @@ async def test_agent_run_stream_events(sentry_init, capture_events, get_test_age send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") # Consume all events test_agent = get_test_agent() async for _ in test_agent.run_stream_events("Test input"): pass - (transaction,) = events - # Verify transaction + (transaction,) = (item.payload for item in items if item.type == "transaction") assert transaction["transaction"] == "invoke_agent test_agent" # Find chat spans - spans = transaction["spans"] - chat_spans = [s for s in spans if s["op"] == "gen_ai.chat"] + spans = [item.payload for item in items if item.type == "span"] + chat_spans = [ + s for s in spans if s["attributes"].get("sentry.op", "") == "gen_ai.chat" + ] assert len(chat_spans) >= 1 # run_stream_events uses run() internally, so streaming should be False for chat_span in chat_spans: - assert chat_span["data"]["gen_ai.response.streaming"] is False + assert chat_span["attributes"]["gen_ai.response.streaming"] is False @pytest.mark.asyncio -async def test_agent_with_tools(sentry_init, capture_events, get_test_agent): +async def test_agent_with_tools(sentry_init, capture_items, get_test_agent): """ Test that tool execution creates execute_tool spans. """ @@ -325,34 +331,39 @@ def add_numbers(a: int, b: int) -> int: """Add two numbers together.""" return a + b - events = capture_events() + items = capture_items("transaction", "span") result = await test_agent.run("What is 5 + 3?") assert result is not None - (transaction,) = events - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] # Find child span types (invoke_agent is the transaction, not a child span) - chat_spans = [s for s in spans if s["op"] == "gen_ai.chat"] - tool_spans = [s for s in spans if s["op"] == "gen_ai.execute_tool"] + chat_spans = [ + s for s in spans if s["attributes"].get("sentry.op", "") == "gen_ai.chat" + ] + tool_spans = [ + s + for s in spans + if s["attributes"].get("sentry.op", "") == "gen_ai.execute_tool" + ] # Should have tool spans assert len(tool_spans) >= 1 # Check tool span tool_span = tool_spans[0] - assert "execute_tool" in tool_span["description"] - assert tool_span["data"]["gen_ai.operation.name"] == "execute_tool" - assert tool_span["data"]["gen_ai.tool.name"] == "add_numbers" - assert "gen_ai.tool.input" in tool_span["data"] - assert "gen_ai.tool.output" in tool_span["data"] + assert "execute_tool" in tool_span["name"] + assert tool_span["attributes"]["gen_ai.operation.name"] == "execute_tool" + assert tool_span["attributes"]["gen_ai.tool.name"] == "add_numbers" + assert "gen_ai.tool.input" in tool_span["attributes"] + assert "gen_ai.tool.output" in tool_span["attributes"] # Check chat spans have available_tools for chat_span in chat_spans: - assert "gen_ai.request.available_tools" in chat_span["data"] - available_tools_str = chat_span["data"]["gen_ai.request.available_tools"] + assert "gen_ai.request.available_tools" in chat_span["attributes"] + available_tools_str = chat_span["attributes"]["gen_ai.request.available_tools"] # Available tools is serialized as a string assert "add_numbers" in available_tools_str @@ -363,7 +374,7 @@ def add_numbers(a: int, b: int) -> int: ) @pytest.mark.asyncio async def test_agent_with_tool_model_retry( - sentry_init, capture_events, get_test_agent, handled_tool_call_exceptions + sentry_init, capture_items, get_test_agent, handled_tool_call_exceptions ): """ Test that a handled exception is captured when a tool raises ModelRetry. @@ -391,47 +402,51 @@ def add_numbers(a: int, b: int) -> float: raise ModelRetry(message="Try again with the same arguments.") return a + b - events = capture_events() + items = capture_items("event", "transaction", "span") result = await test_agent.run("What is 5 + 3?") assert result is not None if handled_tool_call_exceptions: - (error, transaction) = events - else: - (transaction,) = events - spans = transaction["spans"] - - if handled_tool_call_exceptions: + (error,) = (item.payload for item in items if item.type == "event") assert error["level"] == "error" assert error["exception"]["values"][0]["mechanism"]["handled"] + spans = [item.payload for item in items if item.type == "span"] # Find child span types (invoke_agent is the transaction, not a child span) - chat_spans = [s for s in spans if s["op"] == "gen_ai.chat"] - tool_spans = [s for s in spans if s["op"] == "gen_ai.execute_tool"] + chat_spans = [ + s for s in spans if s["attributes"].get("sentry.op", "") == "gen_ai.chat" + ] + tool_spans = [ + s + for s in spans + if s["attributes"].get("sentry.op", "") == "gen_ai.execute_tool" + ] # Should have tool spans assert len(tool_spans) >= 1 # Check tool spans model_retry_tool_span = tool_spans[0] - assert "execute_tool" in model_retry_tool_span["description"] - assert model_retry_tool_span["data"]["gen_ai.operation.name"] == "execute_tool" - assert model_retry_tool_span["data"]["gen_ai.tool.name"] == "add_numbers" - assert "gen_ai.tool.input" in model_retry_tool_span["data"] + assert "execute_tool" in model_retry_tool_span["name"] + assert ( + model_retry_tool_span["attributes"]["gen_ai.operation.name"] == "execute_tool" + ) + assert model_retry_tool_span["attributes"]["gen_ai.tool.name"] == "add_numbers" + assert "gen_ai.tool.input" in model_retry_tool_span["attributes"] tool_span = tool_spans[1] - assert "execute_tool" in tool_span["description"] - assert tool_span["data"]["gen_ai.operation.name"] == "execute_tool" - assert tool_span["data"]["gen_ai.tool.name"] == "add_numbers" - assert "gen_ai.tool.input" in tool_span["data"] - assert "gen_ai.tool.output" in tool_span["data"] + assert "execute_tool" in tool_span["name"] + assert tool_span["attributes"]["gen_ai.operation.name"] == "execute_tool" + assert tool_span["attributes"]["gen_ai.tool.name"] == "add_numbers" + assert "gen_ai.tool.input" in tool_span["attributes"] + assert "gen_ai.tool.output" in tool_span["attributes"] # Check chat spans have available_tools for chat_span in chat_spans: - assert "gen_ai.request.available_tools" in chat_span["data"] - available_tools_str = chat_span["data"]["gen_ai.request.available_tools"] + assert "gen_ai.request.available_tools" in chat_span["attributes"] + available_tools_str = chat_span["attributes"]["gen_ai.request.available_tools"] # Available tools is serialized as a string assert "add_numbers" in available_tools_str @@ -442,7 +457,7 @@ def add_numbers(a: int, b: int) -> float: ) @pytest.mark.asyncio async def test_agent_with_tool_validation_error( - sentry_init, capture_events, get_test_agent, handled_tool_call_exceptions + sentry_init, capture_items, get_test_agent, handled_tool_call_exceptions ): """ Test that a handled exception is captured when a tool has unsatisfiable constraints. @@ -464,7 +479,7 @@ def add_numbers(a: Annotated[int, Field(gt=0, lt=0)], b: int) -> int: """Add two numbers together.""" return a + b - events = capture_events() + items = capture_items("event", "transaction", "span") result = None with pytest.raises(UnexpectedModelBehavior): @@ -473,42 +488,45 @@ def add_numbers(a: Annotated[int, Field(gt=0, lt=0)], b: int) -> int: assert result is None if handled_tool_call_exceptions: - (error, model_behaviour_error, transaction) = events - else: ( + error, model_behaviour_error, - transaction, - ) = events - spans = transaction["spans"] - - if handled_tool_call_exceptions: + ) = (item.payload for item in items if item.type == "event") assert error["level"] == "error" assert error["exception"]["values"][0]["mechanism"]["handled"] - # Find child span types (invoke_agent is the transaction, not a child span) - chat_spans = [s for s in spans if s["op"] == "gen_ai.chat"] - tool_spans = [s for s in spans if s["op"] == "gen_ai.execute_tool"] + spans = [item.payload for item in items if item.type == "span"] + chat_spans = [ + s for s in spans if s["attributes"].get("sentry.op", "") == "gen_ai.chat" + ] + tool_spans = [ + s + for s in spans + if s["attributes"].get("sentry.op", "") == "gen_ai.execute_tool" + ] # Should have tool spans assert len(tool_spans) >= 1 # Check tool spans model_retry_tool_span = tool_spans[0] - assert "execute_tool" in model_retry_tool_span["description"] - assert model_retry_tool_span["data"]["gen_ai.operation.name"] == "execute_tool" - assert model_retry_tool_span["data"]["gen_ai.tool.name"] == "add_numbers" - assert "gen_ai.tool.input" in model_retry_tool_span["data"] + assert "execute_tool" in model_retry_tool_span["name"] + assert ( + model_retry_tool_span["attributes"]["gen_ai.operation.name"] == "execute_tool" + ) + assert model_retry_tool_span["attributes"]["gen_ai.tool.name"] == "add_numbers" + assert "gen_ai.tool.input" in model_retry_tool_span["attributes"] # Check chat spans have available_tools for chat_span in chat_spans: - assert "gen_ai.request.available_tools" in chat_span["data"] - available_tools_str = chat_span["data"]["gen_ai.request.available_tools"] + assert "gen_ai.request.available_tools" in chat_span["attributes"] + available_tools_str = chat_span["attributes"]["gen_ai.request.available_tools"] # Available tools is serialized as a string assert "add_numbers" in available_tools_str @pytest.mark.asyncio -async def test_agent_with_tools_streaming(sentry_init, capture_events, get_test_agent): +async def test_agent_with_tools_streaming(sentry_init, capture_items, get_test_agent): """ Test that tool execution works correctly with streaming. """ @@ -525,37 +543,40 @@ def multiply(a: int, b: int) -> int: """Multiply two numbers.""" return a * b - events = capture_events() + items = capture_items("transaction", "span") async with test_agent.run_stream("What is 7 times 8?") as result: async for _ in result.stream_output(): pass - (transaction,) = events - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] # Find span types - chat_spans = [s for s in spans if s["op"] == "gen_ai.chat"] - tool_spans = [s for s in spans if s["op"] == "gen_ai.execute_tool"] + chat_spans = [ + s for s in spans if s["attributes"].get("sentry.op", "") == "gen_ai.chat" + ] + tool_spans = [ + s + for s in spans + if s["attributes"].get("sentry.op", "") == "gen_ai.execute_tool" + ] # Should have tool spans assert len(tool_spans) >= 1 # Verify streaming flag is True for chat_span in chat_spans: - assert chat_span["data"]["gen_ai.response.streaming"] is True + assert chat_span["attributes"]["gen_ai.response.streaming"] is True # Check tool span tool_span = tool_spans[0] - assert tool_span["data"]["gen_ai.tool.name"] == "multiply" - assert "gen_ai.tool.input" in tool_span["data"] - assert "gen_ai.tool.output" in tool_span["data"] + assert tool_span["attributes"]["gen_ai.tool.name"] == "multiply" + assert "gen_ai.tool.input" in tool_span["attributes"] + assert "gen_ai.tool.output" in tool_span["attributes"] @pytest.mark.asyncio -async def test_model_settings( - sentry_init, capture_events, get_test_agent_with_settings -): +async def test_model_settings(sentry_init, capture_items, get_test_agent_with_settings): """ Test that model settings are captured in spans. """ @@ -564,23 +585,24 @@ async def test_model_settings( traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") test_agent_with_settings = get_test_agent_with_settings() await test_agent_with_settings.run("Test input") - (transaction,) = events - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] # Find chat span - chat_spans = [s for s in spans if s["op"] == "gen_ai.chat"] + chat_spans = [ + s for s in spans if s["attributes"].get("sentry.op", "") == "gen_ai.chat" + ] assert len(chat_spans) >= 1 chat_span = chat_spans[0] # Check that model settings are captured - assert chat_span["data"].get("gen_ai.request.temperature") == 0.7 - assert chat_span["data"].get("gen_ai.request.max_tokens") == 100 - assert chat_span["data"].get("gen_ai.request.top_p") == 0.9 + assert chat_span["attributes"].get("gen_ai.request.temperature") == 0.7 + assert chat_span["attributes"].get("gen_ai.request.max_tokens") == 100 + assert chat_span["attributes"].get("gen_ai.request.top_p") == 0.9 @pytest.mark.asyncio @@ -594,7 +616,7 @@ async def test_model_settings( ], ) async def test_system_prompt_attribute( - sentry_init, capture_events, send_default_pii, include_prompts + sentry_init, capture_items, send_default_pii, include_prompts ): """ Test that system prompts are included as the first message. @@ -611,21 +633,24 @@ async def test_system_prompt_attribute( send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("transaction", "span") await agent.run("Hello") - (transaction,) = events - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] # The transaction IS the invoke_agent span, check for messages in chat spans instead - chat_spans = [s for s in spans if s["op"] == "gen_ai.chat"] + chat_spans = [ + s for s in spans if s["attributes"].get("sentry.op", "") == "gen_ai.chat" + ] assert len(chat_spans) >= 1 chat_span = chat_spans[0] if send_default_pii and include_prompts: - system_instructions = chat_span["data"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS] + system_instructions = chat_span["attributes"][ + SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS + ] assert json.loads(system_instructions) == [ { "type": "text", @@ -633,11 +658,11 @@ async def test_system_prompt_attribute( } ] else: - assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in chat_span["data"] + assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in chat_span["attributes"] @pytest.mark.asyncio -async def test_error_handling(sentry_init, capture_events): +async def test_error_handling(sentry_init, capture_items): """ Test error handling in agent execution. """ @@ -653,14 +678,13 @@ async def test_error_handling(sentry_init, capture_events): traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") # Simple run that should succeed await agent.run("Hello") # At minimum, we should have a transaction - assert len(events) >= 1 - transaction = [e for e in events if e.get("type") == "transaction"][0] + transaction = next(item.payload for item in items if item.type == "transaction") assert transaction["transaction"] == "invoke_agent test_error" # Transaction should complete successfully (status key may not exist if no error) trace_status = transaction["contexts"]["trace"].get("status") @@ -668,7 +692,7 @@ async def test_error_handling(sentry_init, capture_events): @pytest.mark.asyncio -async def test_without_pii(sentry_init, capture_events, get_test_agent): +async def test_without_pii(sentry_init, capture_items, get_test_agent): """ Test that PII is not captured when send_default_pii is False. """ @@ -678,25 +702,26 @@ async def test_without_pii(sentry_init, capture_events, get_test_agent): send_default_pii=False, ) - events = capture_events() + items = capture_items("transaction", "span") test_agent = get_test_agent() await test_agent.run("Sensitive input") - (transaction,) = events - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] # Find child spans (invoke_agent is the transaction, not a child span) - chat_spans = [s for s in spans if s["op"] == "gen_ai.chat"] + chat_spans = [ + s for s in spans if s["attributes"].get("sentry.op", "") == "gen_ai.chat" + ] # Verify that messages and response text are not captured for span in chat_spans: - assert "gen_ai.request.messages" not in span["data"] - assert "gen_ai.response.text" not in span["data"] + assert "gen_ai.request.messages" not in span["attributes"] + assert "gen_ai.response.text" not in span["attributes"] @pytest.mark.asyncio -async def test_without_pii_tools(sentry_init, capture_events, get_test_agent): +async def test_without_pii_tools(sentry_init, capture_items, get_test_agent): """ Test that tool input/output are not captured when send_default_pii is False. """ @@ -713,24 +738,27 @@ def sensitive_tool(data: str) -> str: """A tool with sensitive data.""" return f"Processed: {data}" - events = capture_events() + items = capture_items("transaction", "span") await test_agent.run("Use sensitive tool with private data") - (transaction,) = events - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] # Find tool spans - tool_spans = [s for s in spans if s["op"] == "gen_ai.execute_tool"] + tool_spans = [ + s + for s in spans + if s["attributes"].get("sentry.op", "") == "gen_ai.execute_tool" + ] # If tool was executed, verify input/output are not captured for tool_span in tool_spans: - assert "gen_ai.tool.input" not in tool_span["data"] - assert "gen_ai.tool.output" not in tool_span["data"] + assert "gen_ai.tool.input" not in tool_span["attributes"] + assert "gen_ai.tool.output" not in tool_span["attributes"] @pytest.mark.asyncio -async def test_multiple_agents_concurrent(sentry_init, capture_events, get_test_agent): +async def test_multiple_agents_concurrent(sentry_init, capture_items, get_test_agent): """ Test that multiple agents can run concurrently without interfering. """ @@ -739,7 +767,7 @@ async def test_multiple_agents_concurrent(sentry_init, capture_events, get_test_ traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") test_agent = get_test_agent() @@ -750,18 +778,16 @@ async def run_agent(input_text): results = await asyncio.gather(*[run_agent(f"Input {i}") for i in range(3)]) assert len(results) == 3 - assert len(events) == 3 # Verify each transaction is separate + events = [item.payload for item in items if item.type == "transaction"] + assert len(events) == 3 for i, transaction in enumerate(events): - assert transaction["type"] == "transaction" assert transaction["transaction"] == "invoke_agent test_agent" - # Each should have its own spans - assert len(transaction["spans"]) >= 1 @pytest.mark.asyncio -async def test_message_history(sentry_init, capture_events): +async def test_message_history(sentry_init, capture_items): """ Test that full conversation history is captured in chat spans. """ @@ -776,7 +802,7 @@ async def test_message_history(sentry_init, capture_events): send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") # First message await agent.run("Hello, I'm Alice") @@ -797,23 +823,26 @@ async def test_message_history(sentry_init, capture_events): await agent.run("What is my name?", message_history=history) # We should have 2 transactions + events = [item.payload for item in items if item.type == "transaction"] assert len(events) >= 2 # Check the second transaction has the full history second_transaction = events[1] spans = second_transaction["spans"] - chat_spans = [s for s in spans if s["op"] == "gen_ai.chat"] + chat_spans = [ + s for s in spans if s["attributes"].get("sentry.op", "") == "gen_ai.chat" + ] if chat_spans: chat_span = chat_spans[0] - if "gen_ai.request.messages" in chat_span["data"]: - messages_data = chat_span["data"]["gen_ai.request.messages"] + if "gen_ai.request.messages" in chat_span["attributes"]: + messages_data = chat_span["attributes"]["gen_ai.request.messages"] # Should have multiple messages including history assert len(messages_data) > 1 @pytest.mark.asyncio -async def test_gen_ai_system(sentry_init, capture_events, get_test_agent): +async def test_gen_ai_system(sentry_init, capture_items, get_test_agent): """ Test that gen_ai.system is set from the model. """ @@ -822,26 +851,27 @@ async def test_gen_ai_system(sentry_init, capture_events, get_test_agent): traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") test_agent = get_test_agent() await test_agent.run("Test input") - (transaction,) = events - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] # Find chat span - chat_spans = [s for s in spans if s["op"] == "gen_ai.chat"] + chat_spans = [ + s for s in spans if s["attributes"].get("sentry.op", "") == "gen_ai.chat" + ] assert len(chat_spans) >= 1 chat_span = chat_spans[0] # gen_ai.system should be set from the model (TestModel -> 'test') - assert "gen_ai.system" in chat_span["data"] - assert chat_span["data"]["gen_ai.system"] == "test" + assert "gen_ai.system" in chat_span["attributes"] + assert chat_span["attributes"]["gen_ai.system"] == "test" @pytest.mark.asyncio -async def test_include_prompts_false(sentry_init, capture_events, get_test_agent): +async def test_include_prompts_false(sentry_init, capture_items, get_test_agent): """ Test that prompts are not captured when include_prompts=False. """ @@ -851,25 +881,26 @@ async def test_include_prompts_false(sentry_init, capture_events, get_test_agent send_default_pii=True, # Even with PII enabled, prompts should not be captured ) - events = capture_events() + items = capture_items("transaction", "span") test_agent = get_test_agent() await test_agent.run("Sensitive prompt") - (transaction,) = events - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] # Find child spans (invoke_agent is the transaction, not a child span) - chat_spans = [s for s in spans if s["op"] == "gen_ai.chat"] + chat_spans = [ + s for s in spans if s["attributes"].get("sentry.op", "") == "gen_ai.chat" + ] # Verify that messages and response text are not captured for span in chat_spans: - assert "gen_ai.request.messages" not in span["data"] - assert "gen_ai.response.text" not in span["data"] + assert "gen_ai.request.messages" not in span["attributes"] + assert "gen_ai.response.text" not in span["attributes"] @pytest.mark.asyncio -async def test_include_prompts_true(sentry_init, capture_events, get_test_agent): +async def test_include_prompts_true(sentry_init, capture_items, get_test_agent): """ Test that prompts are captured when include_prompts=True (default). """ @@ -879,26 +910,27 @@ async def test_include_prompts_true(sentry_init, capture_events, get_test_agent) send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") test_agent = get_test_agent() await test_agent.run("Test prompt") - (transaction,) = events - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] # Find child spans (invoke_agent is the transaction, not a child span) - chat_spans = [s for s in spans if s["op"] == "gen_ai.chat"] + chat_spans = [ + s for s in spans if s["attributes"].get("sentry.op", "") == "gen_ai.chat" + ] # Verify that messages are captured in chat spans assert len(chat_spans) >= 1 for chat_span in chat_spans: - assert "gen_ai.request.messages" in chat_span["data"] + assert "gen_ai.request.messages" in chat_span["attributes"] @pytest.mark.asyncio async def test_include_prompts_false_with_tools( - sentry_init, capture_events, get_test_agent + sentry_init, capture_items, get_test_agent ): """ Test that tool input/output are not captured when include_prompts=False. @@ -916,26 +948,27 @@ def test_tool(value: int) -> int: """A test tool.""" return value * 2 - events = capture_events() + items = capture_items("transaction", "span") await test_agent.run("Use the test tool with value 5") - (transaction,) = events - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] # Find tool spans - tool_spans = [s for s in spans if s["op"] == "gen_ai.execute_tool"] + tool_spans = [ + s + for s in spans + if s["attributes"].get("sentry.op", "") == "gen_ai.execute_tool" + ] # If tool was executed, verify input/output are not captured for tool_span in tool_spans: - assert "gen_ai.tool.input" not in tool_span["data"] - assert "gen_ai.tool.output" not in tool_span["data"] + assert "gen_ai.tool.input" not in tool_span["attributes"] + assert "gen_ai.tool.output" not in tool_span["attributes"] @pytest.mark.asyncio -async def test_include_prompts_requires_pii( - sentry_init, capture_events, get_test_agent -): +async def test_include_prompts_requires_pii(sentry_init, capture_items, get_test_agent): """ Test that include_prompts requires send_default_pii=True. """ @@ -945,25 +978,26 @@ async def test_include_prompts_requires_pii( send_default_pii=False, # PII disabled ) - events = capture_events() + items = capture_items("transaction", "span") test_agent = get_test_agent() await test_agent.run("Test prompt") - (transaction,) = events - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] # Find child spans (invoke_agent is the transaction, not a child span) - chat_spans = [s for s in spans if s["op"] == "gen_ai.chat"] + chat_spans = [ + s for s in spans if s["attributes"].get("sentry.op", "") == "gen_ai.chat" + ] # Even with include_prompts=True, if PII is disabled, messages should not be captured for span in chat_spans: - assert "gen_ai.request.messages" not in span["data"] - assert "gen_ai.response.text" not in span["data"] + assert "gen_ai.request.messages" not in span["attributes"] + assert "gen_ai.response.text" not in span["attributes"] @pytest.mark.asyncio -async def test_mcp_tool_execution_spans(sentry_init, capture_events): +async def test_mcp_tool_execution_spans(sentry_init, capture_items): """ Test that MCP (Model Context Protocol) tool calls create execute_tool spans. @@ -1035,12 +1069,10 @@ async def mock_map_tool_result_part(part): send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") # Simulate MCP tool execution within a transaction through CombinedToolset - with sentry_sdk.start_transaction( - op="ai.run", name="invoke_agent test_mcp_agent" - ) as transaction: + with sentry_sdk.start_transaction(op="ai.run", name="invoke_agent test_mcp_agent"): # Set up the agent context scope = sentry_sdk.get_current_scope() scope._contexts["pydantic_ai_agent"] = { @@ -1080,13 +1112,10 @@ async def mock_map_tool_result_part(part): # MCP tool might raise if not fully mocked, that's okay pass - events_list = events + events_list = items if len(events_list) == 0: pytest.skip("No events captured, MCP test setup incomplete") - (transaction,) = events_list - transaction["spans"] - # Note: This test manually calls combined.call_tool which doesn't go through # ToolManager._call_tool (which is what the integration patches). # In real-world usage, MCP tools are called through agent.run() which uses ToolManager. @@ -1256,7 +1285,7 @@ async def run_and_check_context(agent, agent_name): @pytest.mark.asyncio -async def test_invoke_agent_with_list_user_prompt(sentry_init, capture_events): +async def test_invoke_agent_with_list_user_prompt(sentry_init, capture_items): """ Test that invoke_agent span handles list user prompts correctly. """ @@ -1271,15 +1300,14 @@ async def test_invoke_agent_with_list_user_prompt(sentry_init, capture_events): send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") # Use a list as user prompt await agent.run(["First part", "Second part"]) - (transaction,) = events - # Check that the invoke_agent transaction has messages data # The invoke_agent is the transaction itself + (transaction,) = [item.payload for item in items if item.type == "transaction"] if "gen_ai.request.messages" in transaction["contexts"]["trace"]["data"]: messages_str = transaction["contexts"]["trace"]["data"][ "gen_ai.request.messages" @@ -1299,7 +1327,7 @@ async def test_invoke_agent_with_list_user_prompt(sentry_init, capture_events): ], ) async def test_invoke_agent_with_instructions( - sentry_init, capture_events, send_default_pii, include_prompts + sentry_init, capture_items, send_default_pii, include_prompts ): """ Test that invoke_agent span handles instructions correctly. @@ -1322,31 +1350,34 @@ async def test_invoke_agent_with_instructions( send_default_pii=send_default_pii, ) - events = capture_events() + items = capture_items("transaction", "span") await agent.run("Test input") - (transaction,) = events - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] # The transaction IS the invoke_agent span, check for messages in chat spans instead - chat_spans = [s for s in spans if s["op"] == "gen_ai.chat"] + chat_spans = [ + s for s in spans if s["attributes"].get("sentry.op", "") == "gen_ai.chat" + ] assert len(chat_spans) >= 1 chat_span = chat_spans[0] if send_default_pii and include_prompts: - system_instructions = chat_span["data"][SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS] + system_instructions = chat_span["attributes"][ + SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS + ] assert json.loads(system_instructions) == [ {"type": "text", "content": "System prompt"}, {"type": "text", "content": "Instruction 1\nInstruction 2"}, ] else: - assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in chat_span["data"] + assert SPANDATA.GEN_AI_SYSTEM_INSTRUCTIONS not in chat_span["attributes"] @pytest.mark.asyncio -async def test_model_name_extraction_with_callable(sentry_init, capture_events): +async def test_model_name_extraction_with_callable(sentry_init, capture_items): """ Test model name extraction when model has a callable name() method. """ @@ -1372,7 +1403,7 @@ async def test_model_name_extraction_with_callable(sentry_init, capture_events): @pytest.mark.asyncio -async def test_model_name_extraction_fallback_to_str(sentry_init, capture_events): +async def test_model_name_extraction_fallback_to_str(sentry_init, capture_items): """ Test model name extraction falls back to str() when no name attribute exists. """ @@ -1399,7 +1430,7 @@ async def test_model_name_extraction_fallback_to_str(sentry_init, capture_events @pytest.mark.asyncio -async def test_model_settings_object_style(sentry_init, capture_events): +async def test_model_settings_object_style(sentry_init, capture_items): """ Test that object-style model settings (non-dict) are handled correctly. """ @@ -1433,7 +1464,7 @@ async def test_model_settings_object_style(sentry_init, capture_events): @pytest.mark.asyncio -async def test_usage_data_partial(sentry_init, capture_events): +async def test_usage_data_partial(sentry_init, capture_items): """ Test that usage data is correctly handled when only some fields are present. """ @@ -1447,14 +1478,15 @@ async def test_usage_data_partial(sentry_init, capture_events): traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") await agent.run("Test input") - (transaction,) = events - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] - chat_spans = [s for s in spans if s["op"] == "gen_ai.chat"] + chat_spans = [ + s for s in spans if s["attributes"].get("sentry.op", "") == "gen_ai.chat" + ] assert len(chat_spans) >= 1 # Check that usage data fields exist (they may or may not be set depending on TestModel) @@ -1464,7 +1496,7 @@ async def test_usage_data_partial(sentry_init, capture_events): @pytest.mark.asyncio -async def test_agent_data_from_scope(sentry_init, capture_events): +async def test_agent_data_from_scope(sentry_init, capture_items): """ Test that agent data can be retrieved from Sentry scope when not passed directly. """ @@ -1479,20 +1511,19 @@ async def test_agent_data_from_scope(sentry_init, capture_events): traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") # The integration automatically sets agent in scope during execution await agent.run("Test input") - (transaction,) = events - - # Verify agent name is captured + # Verify agent name is capture + (transaction,) = (item.payload for item in items if item.type == "transaction") assert transaction["transaction"] == "invoke_agent test_scope_agent" @pytest.mark.asyncio async def test_available_tools_without_description( - sentry_init, capture_events, get_test_agent + sentry_init, capture_items, get_test_agent ): """ Test that available tools are captured even when description is missing. @@ -1509,23 +1540,24 @@ def tool_without_desc(x: int) -> int: # No docstring = no description return x * 2 - events = capture_events() + items = capture_items("transaction", "span") await test_agent.run("Use the tool with 5") - (transaction,) = events - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] - chat_spans = [s for s in spans if s["op"] == "gen_ai.chat"] + chat_spans = [ + s for s in spans if s["attributes"].get("sentry.op", "") == "gen_ai.chat" + ] if chat_spans: chat_span = chat_spans[0] - if "gen_ai.request.available_tools" in chat_span["data"]: - tools_str = chat_span["data"]["gen_ai.request.available_tools"] + if "gen_ai.request.available_tools" in chat_span["attributes"]: + tools_str = chat_span["attributes"]["gen_ai.request.available_tools"] assert "tool_without_desc" in tools_str @pytest.mark.asyncio -async def test_output_with_tool_calls(sentry_init, capture_events, get_test_agent): +async def test_output_with_tool_calls(sentry_init, capture_items, get_test_agent): """ Test that tool calls in model response are captured correctly. """ @@ -1542,14 +1574,15 @@ def calc_tool(value: int) -> int: """Calculate something.""" return value + 10 - events = capture_events() + items = capture_items("transaction", "span") await test_agent.run("Use calc_tool with 5") - (transaction,) = events - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] - chat_spans = [s for s in spans if s["op"] == "gen_ai.chat"] + chat_spans = [ + s for s in spans if s["attributes"].get("sentry.op", "") == "gen_ai.chat" + ] # At least one chat span should exist assert len(chat_spans) >= 1 @@ -1558,11 +1591,11 @@ def calc_tool(value: int) -> int: for chat_span in chat_spans: # Tool calls may or may not be in response depending on TestModel behavior # Just verify the span was created and has basic data - assert "gen_ai.operation.name" in chat_span["data"] + assert "gen_ai.operation.name" in chat_span["attributes"] @pytest.mark.asyncio -async def test_message_formatting_with_different_parts(sentry_init, capture_events): +async def test_message_formatting_with_different_parts(sentry_init, capture_items): """ Test that different message part types are handled correctly in ai_client span. """ @@ -1579,7 +1612,7 @@ async def test_message_formatting_with_different_parts(sentry_init, capture_even send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") # Create message history with different part types history = [ @@ -1594,24 +1627,25 @@ async def test_message_formatting_with_different_parts(sentry_init, capture_even await agent.run("What did I say?", message_history=history) - (transaction,) = events - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] - chat_spans = [s for s in spans if s["op"] == "gen_ai.chat"] + chat_spans = [ + s for s in spans if s["attributes"].get("sentry.op", "") == "gen_ai.chat" + ] # Should have chat spans assert len(chat_spans) >= 1 # Check that messages are captured chat_span = chat_spans[0] - if "gen_ai.request.messages" in chat_span["data"]: - messages_data = chat_span["data"]["gen_ai.request.messages"] + if "gen_ai.request.messages" in chat_span["attributes"]: + messages_data = chat_span["attributes"]["gen_ai.request.messages"] # Should contain message history assert messages_data is not None @pytest.mark.asyncio -async def test_update_invoke_agent_span_with_none_output(sentry_init, capture_events): +async def test_update_invoke_agent_span_with_none_output(sentry_init, capture_items): """ Test that update_invoke_agent_span handles None output gracefully. """ @@ -1639,7 +1673,7 @@ async def test_update_invoke_agent_span_with_none_output(sentry_init, capture_ev @pytest.mark.asyncio -async def test_update_ai_client_span_with_none_response(sentry_init, capture_events): +async def test_update_ai_client_span_with_none_response(sentry_init, capture_items): """ Test that update_ai_client_span handles None response gracefully. """ @@ -1666,7 +1700,7 @@ async def test_update_ai_client_span_with_none_response(sentry_init, capture_eve @pytest.mark.asyncio -async def test_agent_without_name(sentry_init, capture_events): +async def test_agent_without_name(sentry_init, capture_items): """ Test that agent without a name is handled correctly. """ @@ -1678,20 +1712,18 @@ async def test_agent_without_name(sentry_init, capture_events): traces_sample_rate=1.0, ) - events = capture_events() + items = capture_items("transaction", "span") await agent.run("Test input") - (transaction,) = events - # Should still create transaction, just with default name - assert transaction["type"] == "transaction" + (transaction,) = (item.payload for item in items if item.type == "transaction") # Transaction name should be "invoke_agent agent" or similar default assert "invoke_agent" in transaction["transaction"] @pytest.mark.asyncio -async def test_model_response_without_parts(sentry_init, capture_events): +async def test_model_response_without_parts(sentry_init, capture_items): """ Test handling of model response without parts attribute. """ @@ -1723,7 +1755,7 @@ async def test_model_response_without_parts(sentry_init, capture_events): @pytest.mark.asyncio -async def test_input_messages_error_handling(sentry_init, capture_events): +async def test_input_messages_error_handling(sentry_init, capture_items): """ Test that _set_input_messages handles errors gracefully. """ @@ -1751,7 +1783,7 @@ async def test_input_messages_error_handling(sentry_init, capture_events): @pytest.mark.asyncio -async def test_available_tools_error_handling(sentry_init, capture_events): +async def test_available_tools_error_handling(sentry_init, capture_items): """ Test that _set_available_tools handles errors gracefully. """ @@ -1781,7 +1813,7 @@ async def test_available_tools_error_handling(sentry_init, capture_events): @pytest.mark.asyncio -async def test_set_usage_data_with_none_usage(sentry_init, capture_events): +async def test_set_usage_data_with_none_usage(sentry_init, capture_items): """ Test that _set_usage_data handles None usage gracefully. """ @@ -1806,7 +1838,7 @@ async def test_set_usage_data_with_none_usage(sentry_init, capture_events): @pytest.mark.asyncio -async def test_set_usage_data_with_partial_fields(sentry_init, capture_events): +async def test_set_usage_data_with_partial_fields(sentry_init, capture_items): """ Test that _set_usage_data handles usage with only some fields. """ @@ -1838,7 +1870,7 @@ async def test_set_usage_data_with_partial_fields(sentry_init, capture_events): @pytest.mark.asyncio -async def test_message_parts_with_tool_return(sentry_init, capture_events): +async def test_message_parts_with_tool_return(sentry_init, capture_items): """ Test that ToolReturnPart messages are handled correctly. """ @@ -1860,22 +1892,23 @@ def test_tool(x: int) -> int: send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") # Run with history containing tool return await agent.run("Use test_tool with 5") - (transaction,) = events - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] - chat_spans = [s for s in spans if s["op"] == "gen_ai.chat"] + chat_spans = [ + s for s in spans if s["attributes"].get("sentry.op", "") == "gen_ai.chat" + ] # Should have chat spans assert len(chat_spans) >= 1 @pytest.mark.asyncio -async def test_message_parts_with_list_content(sentry_init, capture_events): +async def test_message_parts_with_list_content(sentry_init, capture_items): """ Test that message parts with list content are handled correctly. """ @@ -1910,7 +1943,7 @@ async def test_message_parts_with_list_content(sentry_init, capture_events): @pytest.mark.asyncio -async def test_output_data_with_text_and_tool_calls(sentry_init, capture_events): +async def test_output_data_with_text_and_tool_calls(sentry_init, capture_items): """ Test that _set_output_data handles both text and tool calls in response. """ @@ -1949,7 +1982,7 @@ async def test_output_data_with_text_and_tool_calls(sentry_init, capture_events) @pytest.mark.asyncio -async def test_output_data_error_handling(sentry_init, capture_events): +async def test_output_data_error_handling(sentry_init, capture_items): """ Test that _set_output_data handles errors in formatting gracefully. """ @@ -1981,7 +2014,7 @@ async def test_output_data_error_handling(sentry_init, capture_events): @pytest.mark.asyncio -async def test_message_with_system_prompt_part(sentry_init, capture_events): +async def test_message_with_system_prompt_part(sentry_init, capture_items): """ Test that SystemPromptPart is handled with correct role. """ @@ -2017,7 +2050,7 @@ async def test_message_with_system_prompt_part(sentry_init, capture_events): @pytest.mark.asyncio -async def test_message_with_instructions(sentry_init, capture_events): +async def test_message_with_instructions(sentry_init, capture_items): """ Test that messages with instructions field are handled correctly. """ @@ -2052,7 +2085,7 @@ async def test_message_with_instructions(sentry_init, capture_events): @pytest.mark.asyncio -async def test_set_input_messages_without_prompts(sentry_init, capture_events): +async def test_set_input_messages_without_prompts(sentry_init, capture_items): """ Test that _set_input_messages respects _should_send_prompts(). """ @@ -2078,7 +2111,7 @@ async def test_set_input_messages_without_prompts(sentry_init, capture_events): @pytest.mark.asyncio -async def test_set_output_data_without_prompts(sentry_init, capture_events): +async def test_set_output_data_without_prompts(sentry_init, capture_items): """ Test that _set_output_data respects _should_send_prompts(). """ @@ -2107,7 +2140,7 @@ async def test_set_output_data_without_prompts(sentry_init, capture_events): @pytest.mark.asyncio -async def test_get_model_name_with_exception_in_callable(sentry_init, capture_events): +async def test_get_model_name_with_exception_in_callable(sentry_init, capture_items): """ Test that _get_model_name handles exceptions in name() callable. """ @@ -2131,7 +2164,7 @@ async def test_get_model_name_with_exception_in_callable(sentry_init, capture_ev @pytest.mark.asyncio -async def test_get_model_name_with_string_model(sentry_init, capture_events): +async def test_get_model_name_with_string_model(sentry_init, capture_items): """ Test that _get_model_name handles string models. """ @@ -2150,7 +2183,7 @@ async def test_get_model_name_with_string_model(sentry_init, capture_events): @pytest.mark.asyncio -async def test_get_model_name_with_none(sentry_init, capture_events): +async def test_get_model_name_with_none(sentry_init, capture_items): """ Test that _get_model_name handles None model. """ @@ -2169,7 +2202,7 @@ async def test_get_model_name_with_none(sentry_init, capture_events): @pytest.mark.asyncio -async def test_set_model_data_with_system(sentry_init, capture_events): +async def test_set_model_data_with_system(sentry_init, capture_items): """ Test that _set_model_data captures system from model. """ @@ -2200,7 +2233,7 @@ async def test_set_model_data_with_system(sentry_init, capture_events): @pytest.mark.asyncio -async def test_set_model_data_from_agent_scope(sentry_init, capture_events): +async def test_set_model_data_from_agent_scope(sentry_init, capture_items): """ Test that _set_model_data retrieves model from agent in scope when not passed. """ @@ -2234,7 +2267,7 @@ async def test_set_model_data_from_agent_scope(sentry_init, capture_events): @pytest.mark.asyncio -async def test_set_model_data_with_none_settings_values(sentry_init, capture_events): +async def test_set_model_data_with_none_settings_values(sentry_init, capture_items): """ Test that _set_model_data skips None values in settings. """ @@ -2266,7 +2299,7 @@ async def test_set_model_data_with_none_settings_values(sentry_init, capture_eve @pytest.mark.asyncio -async def test_should_send_prompts_without_pii(sentry_init, capture_events): +async def test_should_send_prompts_without_pii(sentry_init, capture_items): """ Test that _should_send_prompts returns False when PII disabled. """ @@ -2284,7 +2317,7 @@ async def test_should_send_prompts_without_pii(sentry_init, capture_events): @pytest.mark.asyncio -async def test_set_agent_data_without_agent(sentry_init, capture_events): +async def test_set_agent_data_without_agent(sentry_init, capture_items): """ Test that _set_agent_data handles None agent gracefully. """ @@ -2309,7 +2342,7 @@ async def test_set_agent_data_without_agent(sentry_init, capture_events): @pytest.mark.asyncio -async def test_set_agent_data_from_scope(sentry_init, capture_events): +async def test_set_agent_data_from_scope(sentry_init, capture_items): """ Test that _set_agent_data retrieves agent from scope when not passed. """ @@ -2341,7 +2374,7 @@ async def test_set_agent_data_from_scope(sentry_init, capture_events): @pytest.mark.asyncio -async def test_set_agent_data_without_name(sentry_init, capture_events): +async def test_set_agent_data_without_name(sentry_init, capture_items): """ Test that _set_agent_data handles agent without name attribute. """ @@ -2371,7 +2404,7 @@ async def test_set_agent_data_without_name(sentry_init, capture_events): @pytest.mark.asyncio -async def test_set_available_tools_without_toolset(sentry_init, capture_events): +async def test_set_available_tools_without_toolset(sentry_init, capture_items): """ Test that _set_available_tools handles agent without toolset. """ @@ -2401,7 +2434,7 @@ async def test_set_available_tools_without_toolset(sentry_init, capture_events): @pytest.mark.asyncio -async def test_set_available_tools_with_schema(sentry_init, capture_events): +async def test_set_available_tools_with_schema(sentry_init, capture_items): """ Test that _set_available_tools extracts tool schema correctly. """ @@ -2437,7 +2470,7 @@ async def test_set_available_tools_with_schema(sentry_init, capture_events): @pytest.mark.asyncio -async def test_execute_tool_span_creation(sentry_init, capture_events): +async def test_execute_tool_span_creation(sentry_init, capture_items): """ Test direct creation of execute_tool span. """ @@ -2464,7 +2497,7 @@ async def test_execute_tool_span_creation(sentry_init, capture_events): @pytest.mark.asyncio -async def test_execute_tool_span_with_mcp_type(sentry_init, capture_events): +async def test_execute_tool_span_with_mcp_type(sentry_init, capture_items): """ Test execute_tool span with MCP tool type. """ @@ -2490,7 +2523,7 @@ async def test_execute_tool_span_with_mcp_type(sentry_init, capture_events): @pytest.mark.asyncio -async def test_execute_tool_span_without_prompts(sentry_init, capture_events): +async def test_execute_tool_span_without_prompts(sentry_init, capture_items): """ Test that execute_tool span respects _should_send_prompts(). """ @@ -2517,7 +2550,7 @@ async def test_execute_tool_span_without_prompts(sentry_init, capture_events): @pytest.mark.asyncio -async def test_execute_tool_span_with_none_args(sentry_init, capture_events): +async def test_execute_tool_span_with_none_args(sentry_init, capture_items): """ Test execute_tool span with None args. """ @@ -2540,7 +2573,7 @@ async def test_execute_tool_span_with_none_args(sentry_init, capture_events): @pytest.mark.asyncio -async def test_update_execute_tool_span_with_none_span(sentry_init, capture_events): +async def test_update_execute_tool_span_with_none_span(sentry_init, capture_items): """ Test that update_execute_tool_span handles None span gracefully. """ @@ -2561,7 +2594,7 @@ async def test_update_execute_tool_span_with_none_span(sentry_init, capture_even @pytest.mark.asyncio -async def test_update_execute_tool_span_with_none_result(sentry_init, capture_events): +async def test_update_execute_tool_span_with_none_result(sentry_init, capture_items): """ Test that update_execute_tool_span handles None result gracefully. """ @@ -2588,7 +2621,7 @@ async def test_update_execute_tool_span_with_none_result(sentry_init, capture_ev @pytest.mark.asyncio -async def test_tool_execution_without_span_context(sentry_init, capture_events): +async def test_tool_execution_without_span_context(sentry_init, capture_items): """ Test that tool execution patch handles case when no span context exists. This tests the code path where current_span is None in _patch_tool_execution. @@ -2617,7 +2650,7 @@ async def test_tool_execution_without_span_context(sentry_init, capture_events): @pytest.mark.asyncio -async def test_invoke_agent_span_with_callable_instruction(sentry_init, capture_events): +async def test_invoke_agent_span_with_callable_instruction(sentry_init, capture_items): """ Test that invoke_agent_span skips callable instructions correctly. """ @@ -2650,7 +2683,7 @@ async def test_invoke_agent_span_with_callable_instruction(sentry_init, capture_ @pytest.mark.asyncio -async def test_invoke_agent_span_with_string_instructions(sentry_init, capture_events): +async def test_invoke_agent_span_with_string_instructions(sentry_init, capture_items): """ Test that invoke_agent_span handles string instructions (not list). """ @@ -2680,7 +2713,7 @@ async def test_invoke_agent_span_with_string_instructions(sentry_init, capture_e @pytest.mark.asyncio -async def test_ai_client_span_with_streaming_flag(sentry_init, capture_events): +async def test_ai_client_span_with_streaming_flag(sentry_init, capture_items): """ Test that ai_client_span reads streaming flag from scope. """ @@ -2706,7 +2739,7 @@ async def test_ai_client_span_with_streaming_flag(sentry_init, capture_events): @pytest.mark.asyncio -async def test_ai_client_span_gets_agent_from_scope(sentry_init, capture_events): +async def test_ai_client_span_gets_agent_from_scope(sentry_init, capture_items): """ Test that ai_client_span gets agent from scope when not passed. """ @@ -2759,7 +2792,7 @@ def _find_binary_content(messages_data, expected_modality, expected_mime_type): @pytest.mark.asyncio -async def test_binary_content_encoding_image(sentry_init, capture_events): +async def test_binary_content_encoding_image(sentry_init, capture_items): """Test that BinaryContent with image data is properly encoded in messages.""" sentry_init( integrations=[PydanticAIIntegration()], @@ -2767,7 +2800,7 @@ async def test_binary_content_encoding_image(sentry_init, capture_events): send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") with sentry_sdk.start_transaction(op="test", name="test"): span = sentry_sdk.start_span(op="test_span") @@ -2782,14 +2815,14 @@ async def test_binary_content_encoding_image(sentry_init, capture_events): _set_input_messages(span, [mock_msg]) span.finish() - (event,) = events + (event,) = (item.payload for item in items if item.type == "transaction") span_data = event["spans"][0]["data"] messages_data = _get_messages_from_span(span_data) assert _find_binary_content(messages_data, "image", "image/png") @pytest.mark.asyncio -async def test_binary_content_encoding_mixed_content(sentry_init, capture_events): +async def test_binary_content_encoding_mixed_content(sentry_init, capture_items): """Test that BinaryContent mixed with text content is properly handled.""" sentry_init( integrations=[PydanticAIIntegration()], @@ -2797,7 +2830,7 @@ async def test_binary_content_encoding_mixed_content(sentry_init, capture_events send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") with sentry_sdk.start_transaction(op="test", name="test"): span = sentry_sdk.start_span(op="test_span") @@ -2814,7 +2847,7 @@ async def test_binary_content_encoding_mixed_content(sentry_init, capture_events _set_input_messages(span, [mock_msg]) span.finish() - (event,) = events + (event,) = (item.payload for item in items if item.type == "transaction") span_data = event["spans"][0]["data"] messages_data = _get_messages_from_span(span_data) @@ -2830,7 +2863,7 @@ async def test_binary_content_encoding_mixed_content(sentry_init, capture_events @pytest.mark.asyncio -async def test_binary_content_in_agent_run(sentry_init, capture_events): +async def test_binary_content_in_agent_run(sentry_init, capture_items): """Test that BinaryContent in actual agent run is properly captured in spans.""" agent = Agent("test", name="test_binary_agent") @@ -2840,28 +2873,33 @@ async def test_binary_content_in_agent_run(sentry_init, capture_events): send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") binary_content = BinaryContent( data=b"fake_image_data_for_testing", media_type="image/png" ) await agent.run(["Analyze this image:", binary_content]) - (transaction,) = events - chat_spans = [s for s in transaction["spans"] if s["op"] == "gen_ai.chat"] + spans = [item.payload for item in items if item.type == "span"] + chat_spans = [ + s for s in spans if s["attributes"].get("sentry.op", "") == "gen_ai.chat" + ] assert len(chat_spans) >= 1 chat_span = chat_spans[0] - if "gen_ai.request.messages" in chat_span["data"]: - messages_str = str(chat_span["data"]["gen_ai.request.messages"]) + if "gen_ai.request.messages" in chat_span["attributes"]: + messages_str = str(chat_span["attributes"]["gen_ai.request.messages"]) assert any(keyword in messages_str for keyword in ["blob", "image", "base64"]) @pytest.mark.asyncio -async def test_set_usage_data_with_cache_tokens(sentry_init, capture_events): +async def test_set_usage_data_with_cache_tokens(sentry_init, capture_items): """Test that cache_read_tokens and cache_write_tokens are tracked.""" - sentry_init(integrations=[PydanticAIIntegration()], traces_sample_rate=1.0) + sentry_init( + integrations=[PydanticAIIntegration()], + traces_sample_rate=1.0, + ) - events = capture_events() + items = capture_items("transaction", "span") with sentry_sdk.start_transaction(op="test", name="test"): span = sentry_sdk.start_span(op="test_span") @@ -2874,7 +2912,7 @@ async def test_set_usage_data_with_cache_tokens(sentry_init, capture_events): _set_usage_data(span, usage) span.finish() - (event,) = events + (event,) = (item.payload for item in items if item.type == "transaction") (span_data,) = event["spans"] assert span_data["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED] == 80 assert span_data["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHE_WRITE] == 20 @@ -2922,7 +2960,7 @@ async def test_set_usage_data_with_cache_tokens(sentry_init, capture_events): ], ) def test_image_url_base64_content_in_span( - sentry_init, capture_events, url, image_url_kwargs, expected_content + sentry_init, capture_items, url, image_url_kwargs, expected_content ): from sentry_sdk.integrations.pydantic_ai.spans.ai_client import ai_client_span @@ -2932,7 +2970,7 @@ def test_image_url_base64_content_in_span( send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") with sentry_sdk.start_transaction(op="test", name="test"): image_url = ImageUrl(url=url, **image_url_kwargs) @@ -2944,10 +2982,12 @@ def test_image_url_base64_content_in_span( span = ai_client_span([mock_msg], None, None, None) span.finish() - (event,) = events - chat_spans = [s for s in event["spans"] if s["op"] == "gen_ai.chat"] + spans = [item.payload for item in items if item.type == "span"] + chat_spans = [ + s for s in spans if s["attributes"].get("sentry.op", "") == "gen_ai.chat" + ] assert len(chat_spans) >= 1 - messages_data = _get_messages_from_span(chat_spans[0]["data"]) + messages_data = _get_messages_from_span(chat_spans[0]["attributes"]) found_image = False for msg in messages_data: @@ -2992,7 +3032,7 @@ def test_image_url_base64_content_in_span( ], ) async def test_invoke_agent_image_url( - sentry_init, capture_events, url, image_url_kwargs, expected_content + sentry_init, capture_items, url, image_url_kwargs, expected_content ): sentry_init( integrations=[PydanticAIIntegration()], @@ -3002,17 +3042,18 @@ async def test_invoke_agent_image_url( agent = Agent("test", name="test_image_url_agent") - events = capture_events() + items = capture_items("transaction", "span") image_url = ImageUrl(url=url, **image_url_kwargs) await agent.run([image_url, "Describe this image"]) - (transaction,) = events - found_image = False - chat_spans = [s for s in transaction["spans"] if s["op"] == "gen_ai.chat"] + spans = [item.payload for item in items if item.type == "span"] + chat_spans = [ + s for s in spans if s["attributes"].get("sentry.op", "") == "gen_ai.chat" + ] for chat_span in chat_spans: - messages_data = _get_messages_from_span(chat_span["data"]) + messages_data = _get_messages_from_span(chat_span["attributes"]) for msg in messages_data: if "content" not in msg: continue @@ -3025,7 +3066,7 @@ async def test_invoke_agent_image_url( @pytest.mark.asyncio -async def test_tool_description_in_execute_tool_span(sentry_init, capture_events): +async def test_tool_description_in_execute_tool_span(sentry_init, capture_items): """ Test that tool description from the tool's docstring is included in execute_tool spans. """ @@ -3046,18 +3087,24 @@ def multiply_numbers(a: int, b: int) -> int: send_default_pii=True, ) - events = capture_events() + items = capture_items("transaction", "span") result = await agent.run("What is 5 times 3?") assert result is not None - (transaction,) = events - spans = transaction["spans"] + spans = [item.payload for item in items if item.type == "span"] - tool_spans = [s for s in spans if s["op"] == "gen_ai.execute_tool"] + tool_spans = [ + s + for s in spans + if s["attributes"].get("sentry.op", "") == "gen_ai.execute_tool" + ] assert len(tool_spans) >= 1 tool_span = tool_spans[0] - assert tool_span["data"]["gen_ai.tool.name"] == "multiply_numbers" - assert SPANDATA.GEN_AI_TOOL_DESCRIPTION in tool_span["data"] - assert "Multiply two numbers" in tool_span["data"][SPANDATA.GEN_AI_TOOL_DESCRIPTION] + assert tool_span["attributes"]["gen_ai.tool.name"] == "multiply_numbers" + assert SPANDATA.GEN_AI_TOOL_DESCRIPTION in tool_span["attributes"] + assert ( + "Multiply two numbers" + in tool_span["attributes"][SPANDATA.GEN_AI_TOOL_DESCRIPTION] + ) diff --git a/tests/tracing/test_decorator.py b/tests/tracing/test_decorator.py index 15432f5862..d370b4bbc9 100644 --- a/tests/tracing/test_decorator.py +++ b/tests/tracing/test_decorator.py @@ -121,9 +121,12 @@ async def _some_function_traced(a, b, c): ) -def test_span_templates_ai_dicts(sentry_init, capture_events): - sentry_init(traces_sample_rate=1.0) - events = capture_events() +def test_span_templates_ai_dicts(sentry_init, capture_items): + sentry_init( + traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, + ) + items = capture_items("span") @sentry_sdk.trace(template=SPANTEMPLATE.AI_TOOL) def my_tool(arg1, arg2): @@ -166,40 +169,54 @@ def my_agent(): with sentry_sdk.start_transaction(name="test-transaction"): my_agent() - (event,) = events - (agent_span, tool_span, chat_span) = event["spans"] + (agent_span, tool_span, chat_span) = ( + item.payload for item in items if item.type == "span" + ) - assert agent_span["op"] == "gen_ai.invoke_agent" assert ( - agent_span["description"] + agent_span["name"] == "invoke_agent test_decorator.test_span_templates_ai_dicts..my_agent" ) - assert agent_span["data"] == { + assert agent_span["attributes"] == { "gen_ai.agent.name": "test_decorator.test_span_templates_ai_dicts..my_agent", "gen_ai.operation.name": "invoke_agent", + "sentry.environment": "production", + "sentry.op": "gen_ai.invoke_agent", + "sentry.origin": "manual", + "sentry.release": mock.ANY, + "sentry.sdk.name": "sentry.python", + "sentry.sdk.version": mock.ANY, + "sentry.segment.id": mock.ANY, + "sentry.segment.name": "test-transaction", "thread.id": mock.ANY, "thread.name": mock.ANY, } - assert tool_span["op"] == "gen_ai.execute_tool" assert ( - tool_span["description"] + tool_span["name"] == "execute_tool test_decorator.test_span_templates_ai_dicts..my_tool" ) - assert tool_span["data"] == { + assert tool_span["attributes"] == { "gen_ai.tool.name": "test_decorator.test_span_templates_ai_dicts..my_tool", "gen_ai.operation.name": "execute_tool", "gen_ai.usage.input_tokens": 10, "gen_ai.usage.output_tokens": 20, "gen_ai.usage.total_tokens": 30, + "sentry.environment": "production", + "sentry.op": "gen_ai.execute_tool", + "sentry.origin": "manual", + "sentry.release": mock.ANY, + "sentry.sdk.name": "sentry.python", + "sentry.sdk.version": mock.ANY, + "sentry.segment.id": mock.ANY, + "sentry.segment.name": "test-transaction", "thread.id": mock.ANY, "thread.name": mock.ANY, } - assert "gen_ai.tool.description" not in tool_span["data"] + assert "gen_ai.tool.description" not in tool_span["attributes"] - assert chat_span["op"] == "gen_ai.chat" - assert chat_span["description"] == "chat my-gpt-4o-mini" - assert chat_span["data"] == { + assert chat_span["name"] == "chat my-gpt-4o-mini" + assert chat_span["attributes"] == { "gen_ai.operation.name": "chat", "gen_ai.request.frequency_penalty": 1.0, "gen_ai.request.max_tokens": 100, @@ -213,14 +230,25 @@ def my_agent(): "gen_ai.usage.input_tokens": 11, "gen_ai.usage.output_tokens": 22, "gen_ai.usage.total_tokens": 33, + "sentry.environment": "production", + "sentry.op": "gen_ai.chat", + "sentry.origin": "manual", + "sentry.release": mock.ANY, + "sentry.sdk.name": "sentry.python", + "sentry.sdk.version": mock.ANY, + "sentry.segment.id": mock.ANY, + "sentry.segment.name": "test-transaction", "thread.id": mock.ANY, "thread.name": mock.ANY, } -def test_span_templates_ai_objects(sentry_init, capture_events): - sentry_init(traces_sample_rate=1.0) - events = capture_events() +def test_span_templates_ai_objects(sentry_init, capture_items): + sentry_init( + traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, + ) + items = capture_items("span") @sentry_sdk.trace(template=SPANTEMPLATE.AI_TOOL) def my_tool(arg1, arg2): @@ -267,40 +295,54 @@ def my_agent(): with sentry_sdk.start_transaction(name="test-transaction"): my_agent() - (event,) = events - (agent_span, tool_span, chat_span) = event["spans"] + (agent_span, tool_span, chat_span) = ( + item.payload for item in items if item.type == "span" + ) - assert agent_span["op"] == "gen_ai.invoke_agent" assert ( - agent_span["description"] + agent_span["name"] == "invoke_agent test_decorator.test_span_templates_ai_objects..my_agent" ) - assert agent_span["data"] == { + assert agent_span["attributes"] == { "gen_ai.agent.name": "test_decorator.test_span_templates_ai_objects..my_agent", "gen_ai.operation.name": "invoke_agent", + "sentry.environment": "production", + "sentry.op": "gen_ai.invoke_agent", + "sentry.origin": "manual", + "sentry.release": mock.ANY, + "sentry.sdk.name": "sentry.python", + "sentry.sdk.version": mock.ANY, + "sentry.segment.id": mock.ANY, + "sentry.segment.name": "test-transaction", "thread.id": mock.ANY, "thread.name": mock.ANY, } - assert tool_span["op"] == "gen_ai.execute_tool" assert ( - tool_span["description"] + tool_span["name"] == "execute_tool test_decorator.test_span_templates_ai_objects..my_tool" ) - assert tool_span["data"] == { + assert tool_span["attributes"] == { "gen_ai.tool.name": "test_decorator.test_span_templates_ai_objects..my_tool", "gen_ai.tool.description": "This is a tool function.", "gen_ai.operation.name": "execute_tool", "gen_ai.usage.input_tokens": 10, "gen_ai.usage.output_tokens": 20, "gen_ai.usage.total_tokens": 30, + "sentry.environment": "production", + "sentry.op": "gen_ai.execute_tool", + "sentry.origin": "manual", + "sentry.release": mock.ANY, + "sentry.sdk.name": "sentry.python", + "sentry.sdk.version": mock.ANY, + "sentry.segment.id": mock.ANY, + "sentry.segment.name": "test-transaction", "thread.id": mock.ANY, "thread.name": mock.ANY, } - assert chat_span["op"] == "gen_ai.chat" - assert chat_span["description"] == "chat my-gpt-4o-mini" - assert chat_span["data"] == { + assert chat_span["name"] == "chat my-gpt-4o-mini" + assert chat_span["attributes"] == { "gen_ai.operation.name": "chat", "gen_ai.request.frequency_penalty": 1.0, "gen_ai.request.max_tokens": 100, @@ -314,15 +356,27 @@ def my_agent(): "gen_ai.usage.input_tokens": 11, "gen_ai.usage.output_tokens": 22, "gen_ai.usage.total_tokens": 33, + "sentry.environment": "production", + "sentry.op": "gen_ai.chat", + "sentry.origin": "manual", + "sentry.release": mock.ANY, + "sentry.sdk.name": "sentry.python", + "sentry.sdk.version": mock.ANY, + "sentry.segment.id": mock.ANY, + "sentry.segment.name": "test-transaction", "thread.id": mock.ANY, "thread.name": mock.ANY, } @pytest.mark.parametrize("send_default_pii", [True, False]) -def test_span_templates_ai_pii(sentry_init, capture_events, send_default_pii): - sentry_init(traces_sample_rate=1.0, send_default_pii=send_default_pii) - events = capture_events() +def test_span_templates_ai_pii(sentry_init, capture_items, send_default_pii): + sentry_init( + traces_sample_rate=1.0, + send_default_pii=send_default_pii, + _experiments={"gen_ai_as_v2_spans": True}, + ) + items = capture_items("span") @sentry_sdk.trace(template=SPANTEMPLATE.AI_TOOL) def my_tool(arg1, arg2, **kwargs): @@ -352,15 +406,14 @@ def my_agent(*args, **kwargs): with sentry_sdk.start_transaction(name="test-transaction"): my_agent(22, 33, arg1=44, arg2=55) - (event,) = events - (_, tool_span, _) = event["spans"] + (_, tool_span, _) = (item.payload for item in items if item.type == "span") if send_default_pii: assert ( - tool_span["data"]["gen_ai.tool.input"] + tool_span["attributes"]["gen_ai.tool.input"] == "{'args': (1, 2), 'kwargs': {'tool_arg1': '3', 'tool_arg2': '4'}}" ) - assert tool_span["data"]["gen_ai.tool.output"] == "'tool_output'" + assert tool_span["attributes"]["gen_ai.tool.output"] == "'tool_output'" else: - assert "gen_ai.tool.input" not in tool_span["data"] - assert "gen_ai.tool.output" not in tool_span["data"] + assert "gen_ai.tool.input" not in tool_span["attributes"] + assert "gen_ai.tool.output" not in tool_span["attributes"] diff --git a/tests/tracing/test_misc.py b/tests/tracing/test_misc.py index 8895c98dbc..4209a02b4b 100644 --- a/tests/tracing/test_misc.py +++ b/tests/tracing/test_misc.py @@ -647,11 +647,14 @@ def test_conversation_id_propagates_to_span_with_ai_op( assert span_data.get("gen_ai.conversation.id") == "conv-ai-op-test" def test_conversation_id_propagates_to_span_with_gen_ai_op( - self, sentry_init, capture_events + self, sentry_init, capture_items ): """Span with gen_ai.* op should get conversation_id.""" - sentry_init(traces_sample_rate=1.0) - events = capture_events() + sentry_init( + traces_sample_rate=1.0, + _experiments={"gen_ai_as_v2_spans": True}, + ) + items = capture_items("span") scope = sentry_sdk.get_current_scope() scope.set_conversation_id("conv-gen-ai-op-test") @@ -660,8 +663,8 @@ def test_conversation_id_propagates_to_span_with_gen_ai_op( with start_span(op="gen_ai.invoke_agent"): pass - (event,) = events - span_data = event["spans"][0]["data"] + spans = [item.payload for item in items if item.type == "span"] + span_data = spans[0]["attributes"] assert span_data.get("gen_ai.conversation.id") == "conv-gen-ai-op-test" def test_conversation_id_not_propagated_to_non_ai_span(