8000 Set basic models for g4f · DataSolveProblems/pyqt-openai@1ab8e40 · GitHub
[go: up one dir, main page]

Skip to content

Commit 1ab8e40

Browse files
committed
Set basic models for g4f
1 parent 2495726 commit 1ab8e40

File tree

6 files changed

+87
-47
lines changed

6 files changed

+87
-47
lines changed

pyqt_openai/__init__.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -336,6 +336,9 @@ def move_updater():
336336

337337
STT_MODEL = 'whisper-1'
338338

339+
# G4F Models
340+
G4F_MODELS = ['gpt-4o', 'gpt-4o-mini', 'gemini-flash', 'claude-3-5-sonnet', 'llama-3.1-70b']
341+
339342
# Endpoint
340343
# https://platform.openai.com/docs/models/model-endpoint-compatibility
341344
OPENAI_ENDPOINT_DICT = {
@@ -389,7 +392,7 @@ def move_updater():
389392
O1_MODELS = ['o1-preview', 'o1-mini']
390393

391394
# Dictionary that stores the platform and model pairs
392-
PLATFORM_MODEL_DICT = {
395+
PROVIDER_MODEL_DICT = {
393396
'OpenAI': ['gpt-4o', 'gpt-4o-mini']+O1_MODELS,
394397
'Gemini': ['gemini-1.5-flash'],
395398
'Claude': ['claude-3-5-sonnet-20240620'],

pyqt_openai/chat_widget/center/chatWidget.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -163,7 +163,8 @@ def __chat(self):
163163
presence_penalty, stream,
164164
use_max_tokens, max_tokens,
165165
images,
166-
is_llama_available, is_json_response_available, json_content)
166+
is_llama_available, is_json_response_available, json_content,
167+
self.__is_g4f)
167168

168169
# If there is no current conversation selected on the list to the left, make a new one.
169170
if self.__mainWidget.currentIndex() == 0:

pyqt_openai/chat_widget/right_sidebar/apiWidget.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ def __initVal(self):
4545
def __initUi(self):
4646
self.setWindowTitle('API Key')
4747

48-
columns = ['Platform', 'API Key', 'Get API Key']
48+
columns = ['Provider', 'API Key', 'Get API Key']
4949
self.__tableWidget = QTableWidget()
5050
self.__tableWidget.setColumnCount(len(columns))
5151
self.__tableWidget.setHorizontalHeaderLabels(columns)

pyqt_openai/chat_widget/right_sidebar/chatRightSideBarWidget.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -33,8 +33,9 @@ def __initUi(self):
3333
self.__llamaPage = LlamaPage()
3434
self.__llamaPage.onDirectorySelected.connect(self.__onDirectorySelected)
3535

36-
tabWidget.addTab(usingG4FPage, 'G4F')
37-
tabWidget.addTab(usingAPIPage, 'LLM')
36+
# TODO LANGUAGE
37+
tabWidget.addTab(usingG4FPage, 'Using G4F (Free)')
38+
tabWidget.addTab(usingAPIPage, 'Using API')
3839
tabWidget.addTab(self.__llamaPage, 'LlamaIndex')
3940
tabWidget.currentChanged.connect(self.__tabChanged)
4041
tabWidget.setTabEnabled(2, self.__use_llama_index)

pyqt_openai/chat_widget/right_sidebar/usingG4FPage.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ def __initVal(self):
2121

2222
def __initUi(self):
2323
modelCmbBox = QComboBox()
24-
modelCmbBox.addItems(get_chat_model())
24+
modelCmbBox.addItems(get_chat_model(is_g4f=True))
2525
modelCmbBox.setCurrentText(self.__model)
2626
modelCmbBox.currentTextChanged.connect(self.__modelChanged)
2727

pyqt_openai/globals.py

Lines changed: 76 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -17,8 +17,8 @@
1717
from openai import OpenAI
1818
from g4f.client import Client
1919

20-
from pyqt_openai import STT_MODEL, OPENAI_ENDPOINT_DICT, PLATFORM_MODEL_DICT, DEFAULT_GEMINI_MODEL, LLAMA_REQUEST_URL, \
21-
OPENAI_CHAT_ENDPOINT, O1_MODELS
20+
from pyqt_openai import STT_MODEL, OPENAI_ENDPOINT_DICT, PROVIDER_MODEL_DICT, DEFAULT_GEMINI_MODEL, LLAMA_REQUEST_URL, \
21+
OPENAI_CHAT_ENDPOINT, O1_MODELS, G4F_MODELS
2222
from pyqt_openai.config_loader import CONFIG_MANAGER
2323
from pyqt_openai.lang.translations import LangClass
2424
from pyqt_openai.models import ChatMessageContainer
@@ -58,9 +58,12 @@ def get_model_endpoint(model):
5858
def get_openai_chat_model():
5959
return OPENAI_ENDPOINT_DICT[OPENAI_CHAT_ENDPOINT]
6060

61-
def get_chat_model():
62-
all_models = [model for models in PLATFORM_MODEL_DICT.values() for model in models]
63-
return all_models
61+
def get_chat_model(is_g4f=False):
62+
if is_g4f:
63+
return G4F_MODELS
64+
else:
65+
all_models = [model for models in PROVIDER_MODEL_DICT.values() for model in models]
66+
return all_models
6467

6568
def get_image_url_from_local(image):
6669
"""
@@ -136,44 +139,55 @@ def get_gpt_argument(model, system, messages, cur_text, temperature, top_p, freq
136139
print(e)
137140
raise e
138141

139-
# Check which platform a specific model belongs to
140-
def get_platform_from_model(model):
141-
for platform, models in PLATFORM_MODEL_DICT.items():
142+
# Check which provider a specific model belongs to
143+
def get_provider_from_model(model):
144+
for provider, models in PROVIDER_MODEL_DICT.items():
142145
if model in models:
143-
return platform
146+
return provider
144147
return None
145148

146-
def get_argument(model, system, messages, cur_text, temperature, top_p, frequency_penalty, presence_penalty, stream,
147-
use_max_tokens, max_tokens,
148-
images,
149-
is_llama_available=False, is_json_response_available=0,
150-
json_content=None
151-
):
152-
try:
153-
platform = get_platform_from_model(model)
154-
if platform == 'OpenAI':
155-
args = get_gpt_argument< 10000 /span>(model, system, messages, cur_text, temperature, top_p, frequency_penalty, presence_penalty, stream,
149+
def get_g4f_argument(model, messages, cur_text, stream):
150+
args = {
151+
'model': model,
152+
'messages': messages,
153+
'stream': stream
154+
}
155+
args['messages'].append({"role": "user", "content": cur_text})
156+
return args
157+
158+
def get_api_argument(model, system, messages, cur_text, temperature, top_p, frequency_penalty, presence_penalty, stream,
156159
use_max_tokens, max_tokens,
157160
images,
158-
is_llama_available=is_llama_available, is_json_response_available=is_json_response_available,
159-
json_content=json_content
160-
)
161-
elif platform == 'Gemini':
161+
is_llama_available=False, is_json_response_available=0,
162+
json_content=None
163+
):
164+
try:
165+
provider = get_provider_from_model(model)
166+
if provider == 'OpenAI':
167+
args = get_gpt_argument(model, system, messages, cur_text, temperature, top_p, frequency_penalty,
168+
presence_penalty, stream,
169+
use_max_tokens, max_tokens,
170+
images,
171+
is_llama_available=is_llama_available,
172+
is_json_response_available=is_json_response_available,
173+
json_content=json_content
174+
)
175+
elif provider == 'Gemini':
162176
args = {
163177
'model': model,
164178
'messages': messages,
165179
'stream': stream
166180
}
167181
args['messages'].append({"role": "user", "content": cur_text})
168-
elif platform == 'Claude':
182+
elif provider == 'Claude':
169183
args = {
170184
'model': model,
171185
'messages': messages,
172186
'max_tokens': 1024,
173187
'stream': stream
174188
}
175189
args['messages'].append({"role": "user", "content": cur_text})
176-
elif platform == 'Llama':
190+
elif provider == 'Llama':
177191
args = {
178192
'model': model,
179193
'messages': messages,
@@ -182,49 +196,70 @@ def get_argument(model, system, messages, cur_text, temperature, top_p, frequenc
182196
}
183197
args['messages'].append({"role": "user", "content": cur_text})
184198
else:
185-
raise Exception(f'Platform not found for model {model}')
199+
raise Exception(f'Provider not found for model {model}')
200+
return args
201+
except Exception as e:
202+
print(e)
203+
raise e
204+
205+
def get_argument(model, system, messages, cur_text, temperature, top_p, frequency_penalty, presence_penalty, stream,
206+
use_max_tokens, max_tokens,
207+
images,
208+
is_llama_available=False, is_json_response_available=0,
209+
json_content=None,
210+
is_g4f=False
211+
):
212+
try:
213+
if is_g4f:
214+
args = get_g4f_argument(model, messages, cur_text, stream)
215+
else:
216+
args = get_api_argument(model, system, messages, cur_text, temperature, top_p, frequency_penalty, presence_penalty, stream,
217+
use_max_tokens, max_tokens,
218+
images,
219+
is_llama_available=is_llama_available, is_json_response_available=is_json_response_available,
220+
json_content=json_content)
186221
return args
187222
except Exception as e:
188223
print(e)
189224
raise e
190225

191-
def stream_response(platform, response, is_g4f=False):
226+
def stream_response(provider, response, is_g4f=False):
192227
if is_g4f:
193228
for chunk in response:
194229
yield chunk.choices[0].delta.content
195230
else:
196-
if platform == 'OpenAI':
231+
if provider == 'OpenAI':
197232
for chunk in response:
198233
response_text = chunk.choices[0].delta.content
199234
yield response_text
200-
elif platform == 'Gemini':
235+
elif provider == 'Gemini':
201236
for chunk in response:
202237
yield chunk.text
203-
elif platform == 'Claude':
238+
elif provider == 'Claude':
204239
with response as stream:
205240
for text in stream.text_stream:
206241
yield text
207-
elif platform == 'Llama':
242+
elif provider == 'Llama':
208243
for chunk in response:
209244
response_text = chunk.choices[0].delta.content
210245
yield response_text
211246

212247
def get_api_response(args, get_content_only=True):
213-
platform = get_platform_from_model(args['model'])
214-
if platform == 'OpenAI':
248+
provider = get_provider_from_model(args['model'])
249+
if provider == 'OpenAI':
215250
response = OPENAI_CLIENT.chat.completions.create(
216251
**args
217252
)
218253
if args['stream']:
219-
return stream_response(platform, response)
254+
return stream_response(provider, response)
220255
else:
221256
if get_content_only:
222257
if args['model'] in O1_MODELS:
223258
return str(response.choices[0].message.content)
224259
return response.choices[0].message.content
225260
else:
226261
return response
227-
elif platform == 'Gemini':
262+
elif provider == 'Gemini':
228263
# Change 'content' to 'parts'
229264
# Change role's value from 'assistant' to 'model'
230265
for message in args['messages']:
@@ -238,21 +273,21 @@ def get_api_response(args, get_content_only=True):
238273

239274
if args['stream']:
240275
response = chat.send_message(args['messages'][-1]['parts'], stream=args['stream'])
241-
return stream_response(platform, response)
276+
return stream_response(provider, response)
242277
else:
243278
response = chat.send_message(args['messages'][-1]['parts'])
244279
if get_content_only:
245280
return response.text
246281
else:
247282
return response
248-
elif platform == 'Claude':
283+
elif provider == 'Claude':
249284
if args['stream']:
250285
response = CLAUDE_CLIENT.messages.stream(
251286
model=args['model'],
252287
max_tokens=1024,
253288
messages=args['messages']
254289
)
255-
return stream_response(platform, response)
290+
return stream_response(provider, response)
256291
else:
257292
response = CLAUDE_CLIENT.messages.create(
258293
model=args['model'],
@@ -263,12 +298,12 @@ def get_api_response(args, get_content_only=True):
263298
return response.content[0].text
264299
else:
265300
return response
266-
elif platform == 'Llama':
301+
elif provider == 'Llama':
267302
response = LLAMA_CLIENT.chat.completions.create(
268303
**args
269304
)
270305
if args['stream']:
271-
return stream_response(platform, response)
306+
return stream_response(provider, response)
272307
else:
273308
if get_content_only:
274309
return response.choices[0].message.content
@@ -282,7 +317,7 @@ def get_g4f_response(args, get_content_only=True):
282317
messages=args['messages'],
283318
)
284319
if args['stream']:
285-
return stream_response(platform='', response=response, is_g4f=True)
320+
return stream_response(provider='', response=response, is_g4f=True)
286321
else:
287322
if get_content_only:
288323
return response.choices[0].message.content

0 commit comments

Comments
 (0)
0