8000 ready for "stopped by user" feature · ag-python-qt/pyqt-openai@cdd4574 · GitHub
[go: up one dir, main page]

Skip to content

Commit cdd4574

Browse files
committed
ready for "stopped by user" feature
1 parent d39de57 commit cdd4574

File tree

2 files changed

+34
-8
lines changed

2 files changed

+34
-8
lines changed

pyqt_openai/openAiThread.py

Lines changed: 22 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,10 @@ def __init__(self, model, openai_arg, *args, **kwargs):
2424
super().__init__(*args, **kwargs)
2525
self.__endpoint = getModelEndpoint(model)
2626
self.__openai_arg = opena 8000 i_arg
27+
self.__stop_streaming = False
28+
29+
def stop_streaming(self):
30+
self.__stop_streaming = True
2731

2832
def run(self):
2933
try:
@@ -34,13 +38,17 @@ def run(self):
3438
# if it is streaming, type will be generator
3539
if inspect.isgenerator(response):
3640
for chunk in response:
37-
delta = chunk['choices'][0]['delta']
38-
response_text = delta.get('content', '')
39-
if response_text:
40-
self.replyGenerated.emit(response_text, False, True, '')
41-
else:
42-
finish_reason = chunk['choices'][0].get('finish_reason', '')
41+
if self.__stop_streaming:
42+
finish_reason = chunk['choices'][0].get('finish_reason', 'stopped by user')
4343
self.streamFinished.emit(finish_reason)
44+
else:
45+
delta = chunk['choices'][0]['delta']
46+
response_text = delta.get('content', '')
47+
if response_text:
48+
self.replyGenerated.emit(response_text, False, True, '')
49+
else:
50+
finish_reason = chunk['choices'][0].get('finish_reason', '')
51+
self.streamFinished.emit(finish_reason)
4452
else:
4553
response_text = response['choices'][0]['message']['content']
4654
finish_reason = response['choices'][0]['finish_reason']
@@ -60,6 +68,10 @@ def __init__(self, llama_idx_instance, openai_arg, query_text, *args, **kwargs):
6068
self.__llama_idx_instance = llama_idx_instance
6169
self.__openai_arg = openai_arg
6270
self.__query_text = query_text
71+
self.__stop_streaming = False
72+
73+
def stop_streaming(self):
74+
self.__stop_streaming = True
6375

6476
def run(self):
6577
try:
@@ -68,7 +80,10 @@ def run(self):
6880
f = isinstance(resp, StreamingResponse)
6981
if f:
7082
for response_text in resp.response_gen:
71-
self.replyGenerated.emit(response_text, False, f, '')
83+
if self.__stop_streaming:
84+
pass
85+
else:
86+
self.replyGenerated.emit(response_text, False, f, 'stopped by user')
7287
self.streamFinished.emit('')
7388
else:
7489
self.replyGenerated.emit(resp.response, False, f, '')

pyqt_openai/util/llamapage_script.py

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,14 +37,25 @@ def set_similarity_top_k(self, similarity_top_k):
3737

3838
def __init_engine(self):
3939
try:
40+
query_engine_streaming = self.__openai_arg['stream']
41+
42+
keys_to_keep = ['model', 'temperature']
43+
44+
# Create a new dictionary with the desired keys
45+
filtered_dict = {key: self.__openai_arg[key] for key in keys_to_keep}
46+
47+
# If you want to modify the original dictionary in-place, you can use this:
48+
self.__openai_arg.clear()
49+
self.__openai_arg.update(filtered_dict)
50+
4051
llm_predictor = LLMPredictor(llm=ChatOpenAI(**self.__openai_arg, streaming=True))
4152
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, chunk_size_limit=self.__chunk_size_limit)
4253
index = GPTVectorStoreIndex.from_documents(self.__documents, service_context=service_context)
4354

4455
self.__query_engine = index.as_query_engine(
4556
service_context=service_context,
4657
similarity_top_k=self.__similarity_top_k,
47-
streaming=self.__openai_arg['stream']
58+
streaming=query_engine_streaming
4859
)
4960
except Exception as e:
5061
raise Exception

0 commit comments

Comments
 (0)
0