Spaces:
Running
Running
Synced repo using 'sync_with_huggingface' Github Action
Browse files- client/html/index.html +2 -2
- g4f/models.py +2 -6
- server/backend.py +0 -1
client/html/index.html
CHANGED
@@ -93,9 +93,9 @@
|
|
93 |
<div class="buttons">
|
94 |
<div class="field">
|
95 |
<select class="dropdown" name="model" id="model">
|
96 |
-
<option value="gpt-3.5-turbo"
|
97 |
<option value="gpt-3.5-turbo-16k">GPT-3.5-turbo-16k</option>
|
98 |
-
<option value="gpt-4">GPT-4</option>
|
99 |
</select>
|
100 |
</div>
|
101 |
<div class="field">
|
|
|
93 |
<div class="buttons">
|
94 |
<div class="field">
|
95 |
<select class="dropdown" name="model" id="model">
|
96 |
+
<option value="gpt-3.5-turbo">GPT-3.5</option>
|
97 |
<option value="gpt-3.5-turbo-16k">GPT-3.5-turbo-16k</option>
|
98 |
+
<option value="gpt-4" selected>GPT-4</option>
|
99 |
</select>
|
100 |
</div>
|
101 |
<div class="field">
|
g4f/models.py
CHANGED
@@ -10,7 +10,7 @@ class Model:
|
|
10 |
class gpt_35_turbo:
|
11 |
name: str = 'gpt-3.5-turbo'
|
12 |
base_provider: str = 'openai'
|
13 |
-
best_provider: Provider.Provider = Provider.
|
14 |
|
15 |
class gpt_35_turbo_0613:
|
16 |
name: str = 'gpt-3.5-turbo-0613'
|
@@ -30,7 +30,7 @@ class Model:
|
|
30 |
class gpt_35_turbo_16k:
|
31 |
name: str = 'gpt-3.5-turbo-16k'
|
32 |
base_provider: str = 'openai'
|
33 |
-
best_provider: Provider.Provider = Provider.
|
34 |
|
35 |
class gpt_4_dev:
|
36 |
name: str = 'gpt-4-for-dev'
|
@@ -163,10 +163,6 @@ class Model:
|
|
163 |
base_provider: str = 'google'
|
164 |
best_provider: Provider.Provider = Provider.Bard
|
165 |
|
166 |
-
""" 'falcon-40b': Model.falcon_40b,
|
167 |
-
'falcon-7b': Model.falcon_7b,
|
168 |
-
'llama-13b': Model.llama_13b,"""
|
169 |
-
|
170 |
class falcon_40b:
|
171 |
name: str = 'falcon-40b'
|
172 |
base_provider: str = 'huggingface'
|
|
|
10 |
class gpt_35_turbo:
|
11 |
name: str = 'gpt-3.5-turbo'
|
12 |
base_provider: str = 'openai'
|
13 |
+
best_provider: Provider.Provider = Provider.AiService
|
14 |
|
15 |
class gpt_35_turbo_0613:
|
16 |
name: str = 'gpt-3.5-turbo-0613'
|
|
|
30 |
class gpt_35_turbo_16k:
|
31 |
name: str = 'gpt-3.5-turbo-16k'
|
32 |
base_provider: str = 'openai'
|
33 |
+
best_provider: Provider.Provider = Provider.ChatFree
|
34 |
|
35 |
class gpt_4_dev:
|
36 |
name: str = 'gpt-4-for-dev'
|
|
|
163 |
base_provider: str = 'google'
|
164 |
best_provider: Provider.Provider = Provider.Bard
|
165 |
|
|
|
|
|
|
|
|
|
166 |
class falcon_40b:
|
167 |
name: str = 'falcon-40b'
|
168 |
base_provider: str = 'huggingface'
|
server/backend.py
CHANGED
@@ -37,7 +37,6 @@ class Backend_Api:
|
|
37 |
# Generate response
|
38 |
response = ChatCompletion.create(
|
39 |
model=model,
|
40 |
-
stream=True,
|
41 |
chatId=conversation_id,
|
42 |
messages=messages
|
43 |
)
|
|
|
37 |
# Generate response
|
38 |
response = ChatCompletion.create(
|
39 |
model=model,
|
|
|
40 |
chatId=conversation_id,
|
41 |
messages=messages
|
42 |
)
|