8000 feat: Add integration tests for litellm with and without turn on add_… · d33bs/adk-python@8e28587 · GitHub
[go: up one dir, main page]

Skip to content

Commit 8e28587

Browse files
google-genai-botcopybara-github
authored andcommitted
feat: Add integration tests for litellm with and without turn on add_function_to_prompt
Add experiments for google#1273 PiperOrigin-RevId: 771145715
1 parent cb55970 commit 8e28587

File tree

2 files changed

+169
-0
lines changed

2 files changed

+169
-0
lines changed
Lines changed: 65 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,65 @@
1+
# Copyright 2025 Google LLC
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
from google.adk.models import LlmRequest
16+
from google.adk.models import LlmResponse
17+
from google.adk.models.lite_llm import LiteLlm
18+
from google.genai import types
19+
from google.genai.types import Content
20+
from google.genai.types import Part
21+
import pytest
22+
23+
_TEST_MODEL_NAME = "vertex_ai/meta/llama-4-maverick-17b-128e-instruct-maas"
24+
25+
26+
_SYSTEM_PROMPT = """You are a helpful assistant."""
27+
28+
29+
@pytest.fixture
30+
def oss_llm():
31+
return LiteLlm(model=_TEST_MODEL_NAME)
32+
33+
34+
@pytest.fixture
35+
def llm_request():
36+
return LlmRequest(
37+
model=_TEST_MODEL_NAME,
38+
contents=[Content(role="user", parts=[Part.from_text(text="hello")])],
39+
config=types.GenerateContentConfig(
40+
temperature=0.1,
41+
response_modalities=[types.Modality.TEXT],
42+
system_instruction=_SYSTEM_PROMPT,
43+
),
44+
)
45+
46+
47+
@pytest.mark.asyncio
48+
async def test_generate_content_async(oss_llm, llm_request):
49+
async for response in oss_llm.generate_content_async(llm_request):
50+
assert isinstance(response, LlmResponse)
51+
assert response.content.parts[0].text
52+
53+
54+
# Note that, this test disabled streaming because streaming is not supported
55+
# properly in the current test model for now.
56+
@pytest.mark.asyncio
57+
async def test_generate_content_async_stream(oss_llm, llm_request):
58+
responses = [
59+
resp
60+
async for resp in oss_llm.generate_content_async(
61+
llm_request, stream=False
62+
)
63+
]
64+
part = responses[0].content.parts[0]
65+
assert len(part.text) > 0
Lines changed: 104 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,104 @@
1+
# Copyright 2025 Google LLC
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
from google.adk.models import LlmRequest
16+
from google.adk.models import LlmResponse
17+
from google.adk.models.lite_llm import LiteLlm
18+
from google.genai import types
19+
from google.genai.types import Content
20+
from google.genai.types import Part
21+
import litellm
22+
import pytest
23+
24+
litellm.add_function_to_prompt = True
25+
26+
_TEST_MODEL_NAME = "vertex_ai/meta/llama-4-maverick-17b-128e-instruct-maas"
27+
28+
29+
_SYSTEM_PROMPT = """
30+
You are a helpful assistant, and call tools optionally.
31+
If call tools, the tool format should be in json, and the tool arguments should be parsed from users inputs.
32+
"""
33+
34+
35+
_FUNCTIONS = [{
36+
"name": "get_weather",
37+
"description": "Get the weather in a given location",
38+
"parameters": {
39+
"type": "object",
40+
"properties": {
41+
"city": {
42+
"type": "string",
43+
"description": "The city, e.g. San Francisco",
44+
},
45+
},
46+
"required": ["city"],
47+
},
48+
}]
49+
50+
51+
def get_weather(city: str) -> str:
52+
"""Simulates a web search. Use it get information on weather.
53+
54+
Args:
55+
city: A string containing the location to get weather information for.
56+
57+
Returns:
58+
A string with the simulated weather information for the queried city.
59+
"""
60+
if "sf" in city.lower() or "san francisco" in city.lower():
61+
return "It's 70 degrees and foggy."
62+
return "It's 80 degrees and sunny."
63+
64+
65+
@pytest.fixture
66+
def oss_llm_with_function():
67+
return LiteLlm(model=_TEST_MODEL_NAME, functions=_FUNCTIONS)
68+
69+
70+
@pytest.fixture
71+
def llm_request():
72+
return LlmRequest(
73+
model=_TEST_MODEL_NAME,
74+
contents=[
75+
Content(
76+
role="user",
77+
parts=[
78+
Part.from_text(text="What is the weather in San Francisco?")
79+
],
80+
)
81+
],
82+
config=types.GenerateContentConfig(
83+
temperature=0.1,
84+
response_modalities=[types.Modality.TEXT],
85+
system_instruction=_SYSTEM_PROMPT,
86+
),
87+
)
88+
89+
90+
# Note that, this test disabled streaming because streaming is not supported
91+
# properly in the current test model for now.
92+
@pytest.mark.asyncio
93+
async def test_generate_content_asyn_with_function(
94+
oss_llm_with_function, llm_request
95+
):
96+
responses = [
97+
resp
98+
async for resp in oss_llm_with_function.generate_content_async(
99+
llm_request, stream=False
100+
)
101+
]
102+
function_call = responses[0].content.parts[0].function_call
103+
assert function_call.name == "get_weather"
104+
assert function_call.args["city"] == "San Francisco"

0 commit comments

Comments
 (0)
0