17
17
from openai import OpenAI
18
18
from g4f .client import Client
19
19
20
- from pyqt_openai import STT_MODEL , OPENAI_ENDPOINT_DICT , PLATFORM_MODEL_DICT , DEFAULT_GEMINI_MODEL , LLAMA_REQUEST_URL , \
21
- OPENAI_CHAT_ENDPOINT , O1_MODELS
20
+ from pyqt_openai import STT_MODEL , OPENAI_ENDPOINT_DICT , PROVIDER_MODEL_DICT , DEFAULT_GEMINI_MODEL , LLAMA_REQUEST_URL , \
21
+ OPENAI_CHAT_ENDPOINT , O1_MODELS , G4F_MODELS
22
22
from pyqt_openai .config_loader import CONFIG_MANAGER
23
23
from pyqt_openai .lang .translations import LangClass
24
24
from pyqt_openai .models import ChatMessageContainer
@@ -58,9 +58,12 @@ def get_model_endpoint(model):
58
58
def get_openai_chat_model ():
59
59
return OPENAI_ENDPOINT_DICT [OPENAI_CHAT_ENDPOINT ]
60
60
61
- def get_chat_model ():
62
- all_models = [model for models in PLATFORM_MODEL_DICT .values () for model in models ]
63
- return all_models
61
+ def get_chat_model (is_g4f = False ):
62
+ if is_g4f :
63
+ return G4F_MODELS
64
+ else :
65
+ all_models = [model for models in PROVIDER_MODEL_DICT .values () for model in models ]
66
+ return all_models
64
67
65
68
def get_image_url_from_local (image ):
66
69
"""
@@ -136,44 +139,55 @@ def get_gpt_argument(model, system, messages, cur_text, temperature, top_p, freq
136
139
print (e )
137
140
raise e
138
141
139
- # Check which platform a specific model belongs to
140
- def get_platform_from_model (model ):
141
- for platform , models in PLATFORM_MODEL_DICT .items ():
142
+ # Check which provider a specific model belongs to
143
+ def get_provider_from_model (model ):
144
+ for provider , models in PROVIDER_MODEL_DICT .items ():
142
145
if model in models :
143
- return platform
146
+ return provider
144
147
return None
145
148
146
- def get_argument (model , system , messages , cur_text , temperature , top_p , frequency_penalty , presence_penalty , stream ,
147
- use_max_tokens , max_tokens ,
148
- images ,
149
- is_llama_available = False , is_json_response_available = 0 ,
150
- json_content = None
151
- ):
152
- try :
153
- platform = get_platform_from_model ( model )
154
- if platform == 'OpenAI' :
155
- args = get_gpt_argument<
10000
/span>(model , system , messages , cur_text , temperature , top_p , frequency_penalty , presence_penalty , stream ,
149
+ def get_g4f_argument (model , messages , cur_text , stream ):
150
+ args = {
151
+ 'model' : model ,
152
+ 'messages' : messages ,
153
+ 'stream' : stream
154
+ }
155
+ args [ 'messages' ]. append ({ "role" : "user" , "content" : cur_text })
156
+ return args
157
+
158
+ def get_api_argument (model , system , messages , cur_text , temperature , top_p , frequency_penalty , presence_penalty , stream ,
156
159
use_max_tokens , max_tokens ,
157
160
images ,
158
- is_llama_available = is_llama_available , is_json_response_available = is_json_response_available ,
159
- json_content = json_content
160
- )
161
- elif platform == 'Gemini' :
161
+ is_llama_available = False , is_json_response_available = 0 ,
162
+ json_content = None
163
+ ):
164
+ try :
165
+ provider = get_provider_from_model (model )
166
+ if provider == 'OpenAI' :
167
+ args = get_gpt_argument (model , system , messages , cur_text , temperature , top_p , frequency_penalty ,
168
+ presence_penalty , stream ,
169
+ use_max_tokens , max_tokens ,
170
+ images ,
171
+ is_llama_available = is_llama_available ,
172
+ is_json_response_available = is_json_response_available ,
173
+ json_content = json_content
174
+ )
175
+ elif provider == 'Gemini' :
162
176
args = {
163
177
'model' : model ,
164
178
'messages' : messages ,
165
179
'stream' : stream
166
180
}
167
181
args ['messages' ].append ({"role" : "user" , "content" : cur_text })
168
- elif platform == 'Claude' :
182
+ elif provider == 'Claude' :
169
183
args = {
170
184
'model' : model ,
171
185
'messages' : messages ,
172
186
'max_tokens' : 1024 ,
173
187
'stream' : stream
174
188
}
175
189
args ['messages' ].append ({"role" : "user" , "content" : cur_text })
176
- elif platform == 'Llama' :
190
+ elif provider == 'Llama' :
177
191
args = {
178
192
'model' : model ,
179
193
'messages' : messages ,
@@ -182,49 +196,70 @@ def get_argument(model, system, messages, cur_text, temperature, top_p, frequenc
182
196
}
183
197
args ['messages' ].append ({"role" : "user" , "content" : cur_text })
184
198
else :
185
- raise Exception (f'Platform not found for model { model } ' )
199
+ raise Exception (f'Provider not found for model { model } ' )
200
+ return args
201
+ except Exception as e :
202
+ print (e )
203
+ raise e
204
+
205
+ def get_argument (model , system , messages , cur_text , temperature , top_p , frequency_penalty , presence_penalty , stream ,
206
+ use_max_tokens , max_tokens ,
207
+ images ,
208
+ is_llama_available = False , is_json_response_available = 0 ,
209
+ json_content = None ,
210
+ is_g4f = False
211
+ ):
212
+ try :
213
+ if is_g4f :
214
+ args = get_g4f_argument (model , messages , cur_text , stream )
215
+ else :
216
+ args = get_api_argument (model , system , messages , cur_text , temperature , top_p , frequency_penalty , presence_penalty , stream ,
217
+ use_max_tokens , max_tokens ,
218
+ images ,
219
+ is_llama_available = is_llama_available , is_json_response_available = is_json_response_available ,
220
+ json_content = json_content )
186
221
return args
187
222
except Exception as e :
188
223
print (e )
189
224
raise e
190
225
191
- def stream_response (platform , response , is_g4f = False ):
226
+ def stream_response (provider , response , is_g4f = False ):
192
227
if is_g4f :
193
228
for chunk in response :
194
229
yield chunk .choices [0 ].delta .content
195
230
else :
196
- if platform == 'OpenAI' :
231
+ if provider == 'OpenAI' :
197
232
for chunk in response :
198
233
response_text = chunk .choices [0 ].delta .content
199
234
yield response_text
200
- elif platform == 'Gemini' :
235
+ elif provider == 'Gemini' :
201
236
for chunk in response :
202
237
yield chunk .text
203
- elif platform == 'Claude' :
238
+ elif provider == 'Claude' :
204
239
with response as stream :
205
240
for text in stream .text_stream :
206
241
yield text
207
- elif platform == 'Llama' :
242
+ elif provider == 'Llama' :
208
243
for chunk in response :
209
244
response_text = chunk .choices [0 ].delta .content
210
245
yield response_text
211
246
212
247
def get_api_response (args , get_content_only = True ):
213
- platform = get_platform_from_model (args ['model' ])
214
- if platform == 'OpenAI' :
248
+ provider = get_provider_from_model (args ['model' ])
249
+ if provider == 'OpenAI' :
215
250
response = OPENAI_CLIENT .chat .completions .create (
216
251
** args
217
252
)
218
253
if args ['stream' ]:
219
- return stream_response (platform , response )
254
+ return stream_response (provider , response )
220
255
else :
221
256
if get_content_only :
222
257
if args ['model' ] in O1_MODELS :
223
258
return str (response .choices [0 ].message .content )
224
259
return response .choices [0 ].message .content
225
260
else :
226
261
return response
227
- elif platform == 'Gemini' :
262
+ elif provider == 'Gemini' :
228
263
# Change 'content' to 'parts'
229
264
# Change role's value from 'assistant' to 'model'
230
265
for message in args ['messages' ]:
@@ -238,21 +273,21 @@ def get_api_response(args, get_content_only=True):
238
273
239
274
if args ['stream' ]:
240
275
response = chat .send_message (args ['messages' ][- 1 ]['parts' ], stream = args ['stream' ])
241
- return stream_response (platform , response )
276
+ return stream_response (provider , response )
242
277
else :
243
278
response = chat .send_message (args ['messages' ][- 1 ]['parts' ])
244
279
if get_content_only :
245
280
return response .text
246
281
else :
247
282
return response
248
- elif platform == 'Claude' :
283
+ elif provider == 'Claude' :
249
284
if args ['stream' ]:
250
285
response = CLAUDE_CLIENT .messages .stream (
251
286
model = args ['model' ],
252
287
max_tokens = 1024 ,
253
288
messages = args ['messages' ]
254
289
)
255
- return stream_response (platform , response )
290
+ return stream_response (provider , response )
256
291
else :
257
292
response = CLAUDE_CLIENT .messages .create (
258
293
model = args ['model' ],
@@ -263,12 +298,12 @@ def get_api_response(args, get_content_only=True):
263
298
return response .content [0 ].text
264
299
else :
265
300
return response
266
- elif platform == 'Llama' :
301
+ elif provider == 'Llama' :
267
302
response = LLAMA_CLIENT .chat .completions .create (
268
303
** args
269
304
)
270
305
if args ['stream' ]:
271
- return stream_response (platform , response )
306
+ return stream_response (provider , response )
272
307
else :
273
308
if get_content_only :
274
309
return response .choices [0 ].message .content
@@ -282,7 +317,7 @@ def get_g4f_response(args, get_content_only=True):
282
317
messages = args ['messages' ],
283
318
)
284
319
if args ['stream' ]:
285
- return stream_response (platform = '' , response = response , is_g4f = True )
320
+ return stream_response (provider = '' , response = response , is_g4f = True )
286
321
else :
287
322
if get_content_only :
288
323
return response .choices [0 ].message .content
0 commit comments