8000 implement llamaindex (SimpleDirectoryReader) successfully · ag-python-qt/pyqt-openai@7663c82 · GitHub
[go: up one dir, main page]

Skip to content

Commit 7663c82

Browse files
committed
implement llamaindex (SimpleDirectoryReader) successfully
1 parent 07bd8d7 commit 7663c82

File tree

3 files changed

+25
-18
lines changed

3 files changed

+25
-18
lines changed

pyqt_openai/openAiChatBotWidget.py

Lines changed: 13 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
from pyqt_openai.chat_widget.prompt import Prompt
1212
from pyqt_openai.leftSideBar import LeftSideBar
1313
from pyqt_openai.notifier import NotifierWidget
14-
from pyqt_openai.openAiThread import OpenAIThread
14+
from pyqt_openai.openAiThread import OpenAIThread, LlamaOpenAIThread
1515
from pyqt_openai.prompt_gen_widget.promptGeneratorWidget import PromptGeneratorWidget
1616
from pyqt_openai.right_sidebar.aiPlaygroundWidget import AIPlaygroundWidget
1717
from pyqt_openai.util.llamapage_script import GPTLLamaIndexClass
@@ -193,11 +193,12 @@ def __chat(self):
193193
'stream': stream,
194194
}
195195

196+
is_llama_available = self.__llama_class.get_directory() and use_llama_index
196197
# check llamaindex is available
197-
if self.__llama_class.get_directory() and use_llama_index:
198+
if is_llama_available:
198199
del openai_arg['messages']
199-
use_max_token = self.__settings_ini.value('use_max_token', type=bool)
200-
if use_max_token:
200+
use_max_tokens = self.__settings_ini.value('use_max_tokens', type=bool)
201+
if use_max_tokens:
201202
openai_arg['max_tokens'] = max_tokens
202203

203204
if self.__leftSideBarWidget.isCurrentConvExists():
@@ -208,12 +209,17 @@ def __chat(self):
208209
self.__lineEdit.setEnabled(False)
209210
self.__leftSideBarWidget.setEnabled(False)
210211

211-
self.__browser.showLabel(self.__prompt.getContent(), True, False)
212+
query_text = self.__prompt.getContent()
212213

213-
self.__t = OpenAIThread(model, openai_arg)
214+
self.__browser.showLabel(query_text, True, False)
215+
self.__lineEdit.clear()
216+
217+
if is_llama_available:
218+
self.__t = LlamaOpenAIThread(self.__llama_class, openai_arg=openai_arg, query_text=query_text)
219+
else:
220+
self.__t = OpenAIThread(model, openai_arg)
214221
self.__t.replyGenerated.connect(self.__browser.showLabel)
215222
self.__t.streamFinished.connect(self.__browser.streamFinished)
216-
self.__lineEdit.clear()
217223
self.__t.start()
218224
self.__t.finished.connect(self.__afterGenerated)
219225
except Exception as e:

pyqt_openai/openAiThread.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,6 @@ class OpenAIThread(QThread):
2121

2222
def __init__(self, model, openai_arg, *args, **kwargs):
2323
super().__init__(*args, **kwargs)
24-
self.__model = model
2524
self.__endpoint = getModelEndpoint(model)
2625
self.__openai_arg = openai_arg
2726

@@ -55,18 +54,21 @@ class LlamaOpenAIThread(QThread):
5554
replyGenerated = Signal(str, bool, bool)
5655
streamFinished = Signal()
5756

58-
def __init__(self, llama_idx_instance, query_text, *args, **kwargs):
57+
def __init__(self, llama_idx_instance, openai_arg, query_text, *args, **kwargs):
5958
super().__init__(*args, **kwargs)
6059
self.__llama_idx_instance = llama_idx_instance
60+
self.__openai_arg = openai_arg
6161
self.__query_text = query_text
6262

6363
def run(self):
6464
try:
65-
resp = self.__llama_idx_instance.getResponse(self.__query_text)
65+
self.__llama_idx_instance.set_openai_arg(**self.__openai_arg)
66+
resp = self.__llama_idx_instance.get_response(self.__query_text)
6667
f = isinstance(resp, StreamingResponse)
6768
if f:
6869
for response_text in resp.response_gen:
6970
self.replyGenerated.emit(response_text, False, f)
71+
self.streamFinished.emit()
7072
else:
7173
self.replyGenerated.emit(resp.response, False, f)
7274
except openai.error.InvalidRequestError as e:

pyqt_openai/util/llamapage_script.py

Lines changed: 7 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -3,8 +3,8 @@
33
from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader, LLMPredictor, ServiceContext
44
from langchain.chat_models import ChatOpenAI
55

6-
openai.api_key = 'My_key'
7-
os.environ['OPENAI_API_KEY'] = 'My_key'
6+
openai.api_key = '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'
7+
os.environ['OPENAI_API_KEY'] = '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'
88
# this app will set api key to environment variable and save it in openai_ini.ini
99
# openai_ini.ini will be generated if api key you entered is valid
1010

@@ -24,26 +24,26 @@ def __initVal(self) -> None:
2424

2525
def set_directory(self, directory):
2626
self.__directory = directory
27+
self.__documents = SimpleDirectoryReader(self.__directory).load_data()
2728

2829
def get_directory(self):
2930
return self.__directory
3031

3132
def set_openai_arg(self, **args):
3233
self.__openai_arg = args
34+
self.__init_engine()
3335

3436
def set_chunk_size_limit(self, chunk_size_limit):
3537
self.__chunk_size_limit = chunk_size_limit
3638

3739
def set_similarity_top_k(self, similarity_top_k):
3840
self.__similarity_top_k = similarity_top_k
3941

40-
def init_engine(self):
42+
def __init_engine(self):
4143
try:
42-
documents = SimpleDirectoryReader(self.__directory).load_data()
43-
4444
llm_predictor = LLMPredictor(llm=ChatOpenAI(**self.__openai_arg, streaming=True))
4545
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, chunk_size_limit=self.__chunk_size_limit)
46-
index = GPTVectorStoreIndex.from_documents(documents, service_context=service_context)
46+
index = GPTVectorStoreIndex.from_documents(self.__documents, service_context=service_context)
4747

4848
self.__query_engine = index.as_query_engine(
4949
service_context=service_context,
@@ -60,7 +60,7 @@ def get_response(self, text):
6060

6161
return response
6262

63-
#
63+
6464
# openai_arg = {
6565
# 'model': 'gpt-3.5-turbo',
6666
# 'temperature': 0.7,
@@ -73,7 +73,6 @@ def get_response(self, text):
7373
# c = GPTLLamaIndexClass()
7474
# c.set_directory('./llama_example')
7575
# c.set_openai_arg(**openai_arg)
76-
# c.init_engine()
7776
#
7877
# response = c.get_response(
7978
# "Hello, who is yjg30737 and what language is he good at?",

0 commit comments

Comments
 (0)
0