E57F fix: prompt token may be None in streaming mode · google/adk-python@32ee07d · GitHub
[go: up one dir, main page]

Skip to content

Commit 32ee07d

Browse files
wsa-2002copybara-github
authored andcommitted
fix: prompt token may be None in streaming mode
Merge #3462 **Please ensure you have read the [contribution guide](https://github.com/google/adk-python/blob/main/CONTRIBUTING.md) before creating a pull request.** ### Link to Issue or Description of Change **1. Link to an existing issue (if applicable):** - Closes: #_issue_number_ - Related: #_issue_number_ **2. Or, if no issue exists, describe the change:** **Problem:** When using adk in streaming mode, `usage_metadata.prompt_token_count` may be `None` which will emit log ```Invalid type NoneType for attribute 'gen_ai.usage.input_tokens' value. Expected one of ['bool', 'str', 'bytes', 'int', 'float'] or a sequence of those types``` **Solution:** Skip setting span attribute if prompt token count is None **Unit Tests:** - [x] All unit tests pass locally. _Please include a summary of passed `pytest` results._ ### Checklist - [x] I have read the [CONTRIBUTING.md](https://github.com/google/adk-python/blob/main/CONTRIBUTING.md) document. - [x] I have performed a self-review of my own code. - [x] I have commented my code, particularly in hard-to-understand areas. - [x] I have added tests that prove my fix is effective or that my feature works. - [x] New and existing unit tests pass locally with my changes. - [x] I have manually tested my changes end-to-end. - [x] Any dependent changes have been merged and published in downstream modules. COPYBARA_INTEGRATE_REVIEW=#3462 from wsa-2002:prompt-token-count-may-be-none-in-streaming-mode 9466686 PiperOrigin-RevId: 867693355
1 parent 43c437e commit 32ee07d

File tree

2 files changed

+49
-4
lines changed

2 files changed

+49
-4
lines changed

src/google/adk/telemetry/tracing.py

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -327,10 +327,11 @@ def trace_call_llm(
327327
span.set_attribute('gcp.vertex.agent.llm_response', '{}')
328328

329329
if llm_response.usage_metadata is not None:
330-
span.set_attribute(
331-
'gen_ai.usage.input_tokens',
332-
llm_response.usage_metadata.prompt_token_count,
333-
)
330+
if llm_response.usage_metadata.prompt_token_count is not None:
331+
span.set_attribute(
332+
'gen_ai.usage.input_tokens',
333+
llm_response.usage_metadata.prompt_token_count,
334+
)
334335
if llm_response.usage_metadata.candidates_token_count is not None:
335336
span.set_attribute(
336337
'gen_ai.usage.output_tokens',

tests/unittests/telemetry/test_spans.py

Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -167,6 +167,50 @@ async def test_trace_call_llm(monkeypatch, mock_span_fixture):
167167
)
168168

169169

170+
@pytest.mark.asyncio
171+
async def test_trace_call_llm_with_no_usage_metadata(
172+
monkeypatch, mock_span_fixture
173+
):
174+
"""Test trace_call_llm handles usage metadata with None token counts."""
175+
monkeypatch.setattr(
176+
'opentelemetry.trace.get_current_span', lambda: mock_span_fixture
177+
)
178+
179+
agent = LlmAgent(name='test_agent')
180+
invocation_context = await _create_invocation_context(agent)
181+
llm_request = LlmRequest(
182+
model='gemini-pro',
183+
contents=[
184+
types.Content(
185+
role='user',
186+
parts=[types.Part(text='Hello, how are you?')],
187+
),
188+
],
189+
config=types.GenerateContentConfig(
190+
top_p=0.95,
191+
max_output_tokens=1024,
192+
),
193+
)
194+
llm_response = LlmResponse(
195+
turn_complete=True,
196+
finish_reason=types.FinishReason.STOP,
197+
usage_metadata=types.GenerateContentResponseUsageMetadata(),
198+
)
199+
trace_call_llm(invocation_context, 'test_event_id', llm_request, llm_response)
200+
201+
expected_calls = [
202+
mock.call('gen_ai.system', 'gcp.vertex.agent'),
203+
mock.call('gen_ai.request.top_p', 0.95),
204+
mock.call('gen_ai.request.max_tokens', 1024),
205+
mock.call('gcp.vertex.agent.llm_response', mock.ANY),
206+
mock.call('gen_ai.response.finish_reasons', ['stop']),
207+
]
208+
assert mock_span_fixture.set_attribute.call_count == 10
209+
mock_span_fixture.set_attribute.assert_has_calls(
210+
expected_calls, any_order=True
211+
)
212+
213+
170214
@pytest.mark.asyncio
171215
async def test_trace_call_llm_with_binary_content(
172216
monkeypatch, mock_span_fixture

0 commit comments

Comments
 (0)
0