8000 add thread for LlamaIndex · ag-python-qt/pyqt-openai@07bd8d7 · GitHub
[go: up one dir, main page]

Skip to content

Commit 07bd8d7

Browse files
committed
add thread for LlamaIndex
1 parent ae7fd7a commit 07bd8d7

File tree

1 file changed

+28
-11
lines changed

1 file changed

+28
-11
lines changed

pyqt_openai/openAiThread.py

Lines changed: 28 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
import json
33

44
import openai
5+
from llama_index.response.schema import StreamingResponse
56

67
from qtpy.QtCore import QThread, Signal
78

@@ -13,15 +14,14 @@ class OpenAIThread(QThread):
1314
== replyGenerated Signal ==
1415
First: response
1516
Second: user or AI
16-
Third: streaming a chat completion or not
17+
Third: streaming or not streaming
1718
"""
1819
replyGenerated = Signal(str, bool 10000 , bool)
1920
streamFinished = Signal()
2021

2122
def __init__(self, model, openai_arg, *args, **kwargs):
2223
super().__init__(*args, **kwargs)
2324
self.__model = model
24-
print(model)
2525
self.__endpoint = getModelEndpoint(model)
2626
self.__openai_arg = openai_arg
2727

@@ -45,15 +45,32 @@ def run(self):
4545
else:
4646
response_text = response['choices'][0]['message']['content']
4747
self.replyGenerated.emit(response_text, False, False)
48-
elif self.__endpoint == '/v1/completions':
49-
openai_object = openai.Completion.create(
50-
**self.__openai_arg
51-
)
52-
53-
response_text = openai_object['choices'][0]['text'].strip()
54-
self.replyGenerated.emit(response_text, False, False)
5548
except openai.error.InvalidRequestError as e:
56-
print(e)
5749
self.replyGenerated.emit(f'<p style="color:red">{e}</p>', False, False)
5850
except openai.error.RateLimitError as e:
59-
self.replyGenerated.emit(f'<p style="color:red">{e}<br/>Check the usage: https://platform.openai.com/account/usage<br/>Update to paid account: https://platform.openai.com/account/billing/overview', False, False)
51+
self.replyGenerated.emit(f'<p style="color:red">{e}<br/>Check the usage: https://platform.openai.com/account/usage<br/>Update to paid account: https://platform.openai.com/account/billing/overview', False, False)
52+
53+
54+
class LlamaOpenAIThread(QThread):
55+
replyGenerated = Signal(str, bool, bool)
56+
streamFinished = Signal()
57+
58+
def __init__(self, llama_idx_instance, query_text, *args, **kwargs):
59+
super().__init__(*args, **kwargs)
60+
self.__llama_idx_instance = llama_idx_instance
61+
self.__query_text = query_text
62+
63+
def run(self):
64+
try:
65+
resp = self.__llama_idx_instance.getResponse(self.__query_text)
66+
f = isinstance(resp, StreamingResponse)
67+
if f:
68+
for response_text in resp.response_gen:
69+
self.replyGenerated.emit(response_text, False, f)
70+
else:
71+
self.replyGenerated.emit(resp.response, False, f)
72+
except openai.error.InvalidRequestError as e:
73+
self.replyGenerated.emit('<p style="color:red">Your request was rejected as a result of our safety system.<br/>'
74+
'Your prompt may contain text that is not allowed by our safety system.</p>', False)
75+
except openai.error.RateLimitError as e:
76+
self.replyGenerated.emit(f'<p style="color:red">{e}<br/>Check the usage: https://platform.openai.com/account/usage<br/>Update to paid account: https://platform.openai.com/account/billing/overview', False)

0 commit comments

Comments
 (0)
0