10000 Merge branch 'langchain-leo' into langchain-template · allient/create-fastapi-project@b948b5c · GitHub
[go: up one dir, main page]

Skip to content

Commit b948b5c

Browse files
committed
Merge branch 'langchain-leo' into langchain-template
2 parents 97a40ca + 174e32b commit b948b5c

File tree

10 files changed

+449
-14
lines changed

10 files changed

+449
-14
lines changed
Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,3 @@
11
PROJECT_NAME=
2-
OPENAI_API_KEY=
2+
OPENAI_API_KEY=
3+
UNSPLASH_API_KEY=

create_fastapi_project/templates/langchain_basic/app/app/api/v1/endpoints/chat.py

Lines changed: 64 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,11 @@
33
IUserMessage,
44
)
55
from app.utils.adaptive_cards.cards import create_adaptive_card
6-
from app.utils.callback import CustomAsyncCallbackHandler
6+
from app.utils.callback import (
7+
CustomAsyncCallbackHandler,
8+
CustomFinalStreamingStdOutCallbackHandler,
9+
)
10+
from app.utils.tools import GeneralKnowledgeTool, ImageSearchTool, PokemonSearchTool
711
from fastapi import APIRouter, WebSocket
812
from app.utils.uuid6 import uuid7
913
from app.core.config import settings
@@ -18,18 +22,14 @@
1822
)
1923
from langchain.memory import ConversationBufferMemory
2024
from langchain.chains import LLMChain
21-
25+
from langchain.agents import ZeroShotAgent, AgentExecutor
26+
from app.utils.prompt_zero import zero_agent_prompt
2227

2328
router = APIRouter()
2429

2530
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
2631

2732

28-
@router.get("/chat")
29-
async def home():
30-
return "Hello World"
31-
32-
3333
@router.websocket("")
3434
async def websocket_endpoint(websocket: WebSocket):
3535
await websocket.accept()
@@ -85,3 +85,60 @@ async def websocket_endpoint(websocket: WebSocket):
8585
print("#" * 100)
86< 57A0 /code>86
print(response)
8787
print("#" * 100)
88+
89+
90+
@router.websocket("/1")
91+
async def websocket_endpoint(websocket: WebSocket):
92+
await websocket.accept()
93+
94+
while True:
95+
data = await websocket.receive_json()
96+
user_message = data["message"]
97+
user_message_card = create_adaptive_card(user_message)
98+
99+
resp = IChatResponse(
100+
sender="you",
101+
message=user_message_card.to_dict(),
102+
type="start",
103+
message_id=str(uuid7()),
104+
id=str(uuid7()),
105+
)
106+
107+
await websocket.send_json(resp.dict())
108+
109+
message_id: str = str(uuid7())
110+
custom_handler = CustomFinalStreamingStdOutCallbackHandler(
111+
websocket, message_id=message_id
112+
)
113+
114+
tools = [
115+
GeneralKnowledgeTool(),
116+
PokemonSearchTool(),
117+
ImageSearchTool(),
118+
]
119+
120+
llm = ChatOpenAI(
121+
streaming=True,
122+
temperature=0,
123+
)
124+
125+
agent = ZeroShotAgent.from_llm_and_tools(
126+
llm=llm,
127+
tools=tools,
128+
prefix=zero_agent_prompt.prefix,
129+
suffix=zero_agent_prompt.suffix,
130+
format_instructions=zero_agent_prompt.format_instructions,
131+
input_variables=zero_agent_prompt.input_variables,
132+
)
133+
# TODO: We should use this
134+
# * max_execution_time=1,
135+
# early_stopping_method="generate",
136+
agent_executor = AgentExecutor.from_agent_and_tools(
137+
agent=agent,
138+
tools=tools,
139+
verbose=True,
140+
handle_parsing_errors=True,
141+
memory=memory,
142+
)
143+
144+
await agent_executor.arun(input=user_message, callbacks=[custom_handler])

create_fastapi_project/templates/langchain_basic/app/app/core/config.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@ class Settings(BaseSettings):
1515
API_VERSION: str = "v1"
1616
API_V1_STR: str = f"/api/{API_VERSION}"
1717
OPENAI_API_KEY: str
18+
UNSPLASH_API_KEY: str
1819

1920
class Config:
2021
case_sensitive = True

create_fastapi_project/templates/langchain_basic/app/app/templates/chat.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
</head>
77
<body>
88
<webchat-widget
9-
widget-websocket="ws://localhost:8000/api/v1/chat"
9+
widget-websocket="ws://localhost:8000/api/v1/chat/1"
1010
widget-color="#47A7F6"
1111
widget-chat-avatar="https://icon-library.com/images/ai-icon/ai-icon-7.jpg"
1212
widget-user-avatar="https://img2.freepng.es/20210721/osx/transparent-jedi-avatar-60f79d68da49c0.1144258416268404248941.jpg"

create_fastapi_project/templates/langchain_basic/app/app/utils/adaptive_cards/cards.py

Lines changed: 23 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
import adaptive_cards.card_types as types
22
from adaptive_cards.card import AdaptiveCard
33
from adaptive_cards.elements import TextBlock, Image, Media
4-
from adaptive_cards.containers import Container
4+
from adaptive_cards.containers import Container, ImageSet
55
from adaptive_cards.actions import ActionSubmit
66
import re
77

@@ -11,12 +11,17 @@
1111
def custom_media(anwser):
1212
regex_http = r'https?://[^\s"]+'
1313
url_search = re.search(regex_http, anwser)
14+
url_search_all = re.findall(regex_http, anwser)
1415

1516
regex_image = r"\b(https?|ftp):\/\/[^\s/$.?#].[^\s]*\.(jpg|jpeg|png|gif|webp)\b"
1617
url_image_search = re.search(regex_image, anwser)
1718

1819
if url_image_search:
1920
url_image = url_search.group()
21+
if url_image.endswith(")"):
22+
url_image = url_image[:-1]
23+
if url_image.endswith(")."):
24+
url_image = url_image[:-2]
2025
media = Image(url=url_image)
2126
return ICreateMediaAC(media_object=media, media_type="image", url=url_image)
2227

@@ -57,6 +62,17 @@ def custom_media(anwser):
5762
media_object=media, media_type="youtube_video", url=url_youtube_video
5863
)
5964

65+
if len(url_search_all) > 0:
66+
list_media_element = []
67+
for photo in url_search_all:
68+
if "https://images.unsplash.com" in photo:
69+
media = Image(url=photo)
70+
list_media_element.append(media)
71+
body_container_images = ImageSet(images=list_media_element)
72+
return ICreateMediaAC(
73+
media_object=body_container_images, media_type="image", url=""
74+
)
75+
6076
return None
6177

6278

@@ -87,9 +103,12 @@ def create_adaptive_card(answer: str, actions: list[str] = []) -> AdaptiveCard:
87103
# answer = answer.replace(custom_media_element.url, "")
88104

89105
description_text = TextBlock(text=answer, wrap=True)
90-
body_container = Container(
91-
items=[description_text, custom_media_item, hidden_video_youtube]
92-
)
106+
items = [
107+
description_text,
108+
custom_media_item,
109+
hidden_video_youtube,
110+
]
111+
body_container = Container(items=items)
93112

94113
# crear action
95114
actions = [ActionSubmit(title=action) for action in actions]

create_fastapi_project/templates/langchain_basic/app/app/utils/callback.py

Lines changed: 159 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
from app.schemas.message_schema import IChatResponse
22
from app.utils.adaptive_cards.cards import create_adaptive_card, create_image_card
3+
from app.utils.chains import get_suggestions_questions
34
from langchain.callbacks.base import AsyncCallbackHandler
45
from app.utils.uuid6 import uuid7
56
from fastapi import WebSocket
@@ -129,3 +130,161 @@ async def on_llm_end(
129130
type="end",
130131
)
131132
await self.websocket.send_json(resp.dict())
133+
134+
135+
class CustomFinalStreamingStdOutCallbackHandler(AsyncCallbackHandler):
136+
"""Callback handler for streaming in agents.
137+
Only works with agents using LLMs that support streaming.
138+
139+
Only the final output of the agent will be streamed.
140+
"""
141+
142+
def append_to_last_tokens(self, token: str) -> None:
143+
self.last_tokens.append(token)
144+
self.last_tokens_stripped.append(token.strip())
145+
if len(self.last_tokens) > len(self.answer_prefix_tokens):
146+
self.last_tokens.pop(0)
147+
self.last_tokens_stripped.pop(0)
148+
149+
def check_if_answer_reached(self) -> bool:
150+
if self.strip_tokens:
151+
return self.last_tokens_stripped == self.answer_prefix_tokens_stripped
152+
else:
153+
return self.last_tokens == self.answer_prefix_tokens
154+
155+
def update_message_id(self, message_id: str = str(uuid7())):
156+
self.message_id = message_id
157+
158+
def __init__(
159+
self,
160+
websocket: WebSocket,
161+
*,
162+
message_id: str = str(uuid7()),
163+
answer_prefix_tokens: list[str] | None = None,
164+
strip_tokens: bool = True,
165+
stream_prefix: bool = False,
166+
) -> None:
167+
"""Instantiate FinalStreamingStdOutCallbackHandler.
168+
169+
Args:
170+
answer_prefix_tokens: Token sequence that prefixes the answer.
171+
Default is ["Final", "Answer", ":"]
172+
strip_tokens: Ignore white spaces and new lines when comparing
173+
answer_prefix_tokens to last tokens? (to determine if answer has been
174+
reached)
175+
stream_prefix: Should answer prefix itself also be streamed?
176+
"""
177+
self.websocket: WebSocket = websocket
178+
self.message_id: str = message_id
179+
self.text: str = ""
180+
self.started: bool = False
181+
self.loading_card = create_image_card(
182+
"https://res.cloudinary.com/dnv0qwkrk/image/upload/v1691005682/Alita/Ellipsis-2.4s-81px_1_nja8hq.gif"
183+
)
184+
self.adaptive_card = self.loading_card
185+
186+
if answer_prefix_tokens is None:
187+
self.answer_prefix_tokens = DEFAULT_ANSWER_PREFIX_TOKENS
188+
else:
189+
self.answer_prefix_tokens = answer_prefix_tokens
190+
if strip_tokens:
191+
self.answer_prefix_tokens_stripped = [
192+
token.strip() for token in self.answer_prefix_tokens
193+
]
194+
else:
195+
self.answer_prefix_tokens_stripped = self.answer_prefix_tokens
196+
self.last_tokens = [""] * len(self.answer_prefix_tokens)
197+
self.last_tokens_stripped = [""] * len(self.answer_prefix_tokens)
198+
self.strip_tokens = strip_tokens
199+
self.stream_prefix = stream_prefix
200+
self.answer_reached = False
201+
202+
async def on_llm_start(
203+
self, serialized: dict[str, Any], prompts: list[str], **kwargs: Any
204+
) -> None:
205+
"""Run when LLM starts running."""
206+
if self.started == False:
207+
self.started = True
208+
resp = IChatResponse(
209+
id="",
210+
message_id=self.message_id,
211+
sender="bot",
212+
message=self.loading_card.to_dict(),
213+
type="start",
214+
)
215+
await self.websocket.send_json(resp.dict())
216+
217+
async def on_agent_finish(
218+
self,
219+
finish: AgentFinish,
220+
*,
221+
run_id: UUID,
222+
parent_run_id: UUID | None = None,
223+
**kwargs: Any,
224+
) -> Any:
225+
"""Run on agent end."""
226+
print("#" * 100)
227+
print("finish")
228+
print(finish.return_values["output"])
229+
print("#" * 100)
230+
231+
message: str = (
232+
self.text
233+
if self.text != ""
234+
# else "😕 Lo siento no he podido hallar lo que buscabas"
235+
else finish.return_values["output"]
236+
)
237+
self.adaptive_card = create_adaptive_card(message)
238+
239+
resp = IChatResponse(
240+
id="",
241+
message_id=self.message_id,
242+
sender="bot",
243+
message=self.adaptive_card.to_dict(),
244+
type="stream",
245+
)
246+
await self.websocket.send_json(resp.dict())
247+
248+
suggested_responses = await get_suggestions_questions(message)
249+
if len(suggested_responses) > 0:
250+
self.adaptive_card = create_adaptive_card(
251+
answer=message,
252+
)
253+
medium_resp = IChatResponse(
254+
id="",
255+
message_id=self.message_id,
256+
sender="bot",
257+
message=self.adaptive_card.to_dict(),
258+
type="end",
259+
suggested_responses=suggested_responses,
260+
)
261+
await self.websocket.send_json(medium_resp.dict())
262+
263+
# Reset values
264+
self.text = ""
265+
self.answer_reached = False
266+
self.started = False
267+
268+
async def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
269+
"""Run on new LLM token. Only available when streaming is enabled."""
270+
# Remember the last n tokens, where n = len(answer_prefix_tokens)
271+
self.append_to_last_tokens(token)
272+
273+
# Check if the last n tokens match the answer_prefix_tokens list ...
274+
if self.check_if_answer_reached():
275+
self.answer_reached = True
276+
return
277+
278+
# ... if yes, then print tokens from now on
279+
if self.answer_reached:
280+
self.text += f"{token}"
281+
self.adaptive_card = create_adaptive_card(self.text)
282+
283+
resp = IChatResponse(
284+
id="",
285+
message_id=self.message_id,
286+
sender="bot",
287+
message=self.adaptive_card.to_dict(),
288+
type="stream",
289+
)
290+
await self.websocket.send_json(resp.dict())
Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
from langchain import LLMChain, PromptTemplate
2+
from langchain.chat_models import ChatOpenAI
3+
4+
import re
5+
6+
7+
async def get_suggestions_questions(input: str) -> list[str]:
8+
"""Get suggestions questions."""
9+
10+
llm = ChatOpenAI(
11+
streaming=True,
12+
temperature=0,
13+
)
14+
15+
prompt_is_farewell_topic_chain = PromptTemplate(
16+
input_variables=["input"],
17+
template="Determinate if the '{input}' is related to the topic of farewell and return True or False",
18+
)
19+
prompt = PromptTemplate(
20+
input_variables=["input"],
21+
template="Create three good suggestions questions about this topic of: {input}. Return the suggestions like a list.",
22+
)
23+
is_farewell_topic_chain = LLMChain(llm=llm, prompt=prompt_is_farewell_topic_chain)
24+
is_farewell_topic_response = await is_farewell_topic_chain.arun(input)
25+
suggested_responses = []
26+
27+
if "False" in is_farewell_topic_response:
28+
chain = LLMChain(llm=llm, prompt=prompt)
29+
response_chain = await chain.arun(input)
30+
suggested_responses = re.findall(r"\d+\.\s(.*?\?)", response_chain)
31+
suggested_responses = suggested_responses[:3]
32+
33+
return suggested_responses

0 commit comments

Comments
 (0)
0