8000 chore: autoformat the test_litellm.py · marianocodes/adk-python@ae7d3a7 · GitHub
[go: up one dir, main page]

Skip to content

Commit ae7d3a7

Browse files
seanzhougooglecopybara-github
authored andcommitted
chore: autoformat the test_litellm.py
PiperOrigin-RevId: 764937761
1 parent 2b41824 commit ae7d3a7

File tree

1 file changed

+66
-70
lines changed

1 file changed

+66
-70
lines changed

tests/unittests/models/test_litellm.py

Lines changed: 66 additions & 70 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@
1313
# limitations under the License.
1414

1515

16+
import json
1617
from unittest.mock import AsyncMock
1718
from unittest.mock import Mock
1819

@@ -38,7 +39,6 @@
3839
from litellm.types.utils import ModelResponse
3940
from litellm.types.utils import StreamingChoices
4041
import pytest
41-
import json
4242

4343
LLM_REQUEST_WITH_FUNCTION_DECLARATION = LlmRequest(
4444
contents=[
@@ -1190,74 +1190,70 @@ async def test_generate_content_async_stream_with_usage_metadata(
11901190
async def test_generate_content_async_multiple_function_calls(
11911191
mock_completion, lite_llm_instance
11921192
):
1193-
"""Test handling of multiple function calls with different indices in streaming mode.
1194-
1195-
This test verifies that:
1196-
1. Multiple function calls with different indices are handled correctly
1197-
2. Arguments and names are properly accumulated for each function call
1198-
3. The final response contains all function calls with correct indices
1199-
"""
1200-
mock_completion.return_value = MULTIPLE_FUNCTION_CALLS_STREAM
1201-
1202-
llm_request = LlmRequest(
1203-
contents=[
1204-
types.Content(
1205-
role="user",
1206-
parts=[types.Part.from_text(text="Test multiple function calls")],
1207-
)
1208-
],
1209-
config=types.GenerateContentConfig(
1210-
tools=[
1211-
types.Tool(
1212-
function_declarations=[
1213-
types.FunctionDeclaration(
1214-
name="function_1",
1215-
description="First test function",
1216-
parameters=types.Schema(
1217-
type=types.Type.OBJECT,
1218-
properties={
1219-
"arg": types.Schema(type=types.Type.STRING),
1220-
},
1221-
),
1222-
),
1223-
types.FunctionDeclaration(
1224-
name="function_2",
1225-
description="Second test function",
1226-
parameters=types.Schema(
1227-
type=types.Type.OBJECT,
1228-
properties={
1229-
"arg": types.Schema(type=types.Type.STRING),
1230-
},
1231-
),
1232-
),
1233-
]
1234-
)
1235-
],
1236-
),
1237-
)
1193+
"""Test handling of multiple function calls with different indices in streaming mode.
12381194
1239-
responses = []
1240-
async for response in lite_llm_instance.generate_content_async(
1241-
llm_request, stream=True
1242-
):
1243-
responses.append(response)
1244-
1245-
# Verify we got the final response with both function calls
1246-
assert len(responses) > 0
1247-
final_response = responses[-1]
1248-
assert final_response.content.role == "model"
1249-
assert len(final_response.content.parts) == 2
1250-
1251-
# Verify first function call
1252-
assert final_response.content.parts[0].function_call.name == "function_1"
1253-
assert final_response.content.parts[0].function_call.id == "call_1"
1254-
assert final_response.content.parts[0].function_call.args == {
1255-
"arg": "value1"
1256-
}
1195+
This test verifies that:
1196+
1. Multiple function calls with different indices are handled correctly
1197+
2. Arguments and names are properly accumulated for each function call
1198+
3. The final response contains all function calls with correct indices
1199+
"""
1200+
mock_completion.return_value = MULTIPLE_FUNCTION_CALLS_STREAM
12571201

1258-
# Verify second function call
1259-
assert final_response.content.parts[1].function_call.name == "function_2"
1260-
assert final_response.content.parts[1].function_call.id == "call_2"
1261-
assert final_response.content.parts[1].function_call.args == {
1262-
"arg": "value2"
1263-
}
1202+
llm_request = LlmRequest(
1203+
contents=[
1204+
types.Content(
1205+
role="user",
1206+
parts=[types.Part.from_text(text="Test multiple function calls")],
1207+
)
1208+
],
1209+
config=types.GenerateContentConfig(
1210+
tools=[
1211+
types.Tool(
1212+
function_declarations=[
1213+
types.FunctionDeclaration(
1214+
name="function_1",
1215+
description="First test function",
1216+
parameters=types.Schema(
1217+
type=types.Type.OBJECT,
1218+
properties={
1219+
"arg": types.Schema(type=types.Type.STRING),
1220+
},
1221+
),
1222+
),
1223+
types.FunctionDeclaration(
1224+
name="function_2",
1225+
description="Second test function",
1226+
parameters=types.Schema(
1227+
type=types.Type.OBJECT,
1228+
properties={
1229+
"arg": types.Schema(type=types.Type.STRING),
1230+
},
1231+
),
1232+
),
1233+
]
1234+
)
1235+
],
1236+
),
1237+
)
1238+
1239+
responses = []
1240+
async for response in lite_llm_instance.generate_content_async(
1241+
llm_request, stream=True
1242+
):
1243+
responses.append(response)
1244+
1245+
# Verify we got the final response with both function calls
1246+
assert len(responses) > 0
1247+
final_response = responses[-1]
1248+
assert final_response.content.role == "model"
1249+
assert len(final_response.content.parts) == 2
1250+
1251+
# Verify first function call
1252+
assert final_response.content.parts[0].function_call.name == "function_1"
1253+
assert final_response.content.parts[0].function_call.id == "call_1"
1254+
assert final_response.content.parts[0].function_call.args == {"arg": "value1"}
1255+
1256+
# Verify second function call
1257+
assert final_response.content.parts[1].function_call.name == "function_2"
1258+
assert final_response.content.parts[1].function_call.id == "call_2"
1259+
assert final_response.content.parts[1].function_call.args == {"arg": "value2"}

0 commit comments

Comments
 (0)
0