@@ -24,6 +24,10 @@ def __init__(self, model, openai_arg, *args, **kwargs):
24
24
super ().__init__ (* args , ** kwargs )
25
25
self .__endpoint = getModelEndpoint (model )
26
26
self .__openai_arg = opena
8000
i_arg
27
+ self .__stop_streaming = False
28
+
29
+ def stop_streaming (self ):
30
+ self .__stop_streaming = True
27
31
28
32
def run (self ):
29
33
try :
@@ -34,13 +38,17 @@ def run(self):
34
38
# if it is streaming, type will be generator
35
39
if inspect .isgenerator (response ):
36
40
for chunk in response :
37
- delta = chunk ['choices' ][0 ]['delta' ]
38
- response_text = delta .get ('content' , '' )
39
- if response_text :
40
- self .replyGenerated .emit (response_text , False , True , '' )
41
- else :
42
- finish_reason = chunk ['choices' ][0 ].get ('finish_reason' , '' )
41
+ if self .__stop_streaming :
42
+ finish_reason = chunk ['choices' ][0 ].get ('finish_reason' , 'stopped by user' )
43
43
self .streamFinished .emit (finish_reason )
44
+ else :
45
+ delta = chunk ['choices' ][0 ]['delta' ]
46
+ response_text = delta .get ('content' , '' )
47
+ if response_text :
48
+ self .replyGenerated .emit (response_text , False , True , '' )
49
+ else :
50
+ finish_reason = chunk ['choices' ][0 ].get ('finish_reason' , '' )
51
+ self .streamFinished .emit (finish_reason )
44
52
else :
45
53
response_text = response ['choices' ][0 ]['message' ]['content' ]
46
54
finish_reason = response ['choices' ][0 ]['finish_reason' ]
@@ -60,6 +68,10 @@ def __init__(self, llama_idx_instance, openai_arg, query_text, *args, **kwargs):
60
68
self .__llama_idx_instance = llama_idx_instance
61
69
self .__openai_arg = openai_arg
62
70
self .__query_text = query_text
71
+ self .__stop_streaming = False
72
+
73
+ def stop_streaming (self ):
74
+ self .__stop_streaming = True
63
75
64
76
def run (self ):
65
77
try :
@@ -68,7 +80,10 @@ def run(self):
68
80
f = isinstance (resp , StreamingResponse )
69
81
if f :
70
82
for response_text in resp .response_gen :
71
- self .replyGenerated .emit (response_text , False , f , '' )
83
+ if self .__stop_streaming :
84
+ pass
85
+ else :
86
+ self .replyGenerated .emit (response_text , False , f , 'stopped by user' )
72
87
self .streamFinished .emit ('' )
73
88
else :
74
89
self .replyGenerated .emit (resp .response , False , f , '' )
0 commit comments