Spaces:
Runtime error
Runtime error
Synced repo using 'sync_with_huggingface' Github Action
Browse files- client/html/index.html +2 -1
- g4f/Provider/Providers/Chimera.py +1 -0
- g4f/models.py +6 -7
client/html/index.html
CHANGED
@@ -117,7 +117,8 @@
|
|
117 |
<option value="gpt-4-32k">GPT-4-32k</option>
|
118 |
</optgroup>
|
119 |
<optgroup label="LLAMA">
|
120 |
-
<option value="
|
|
|
121 |
</optgroup>
|
122 |
<optgroup label="{{_('IMAGE')}}">
|
123 |
<option value="" disabled>Kandinsky (soon)</option>
|
|
|
117 |
<option value="gpt-4-32k">GPT-4-32k</option>
|
118 |
</optgroup>
|
119 |
<optgroup label="LLAMA">
|
120 |
+
<option value="oasst-sft-6-llama-30b">LLaMa-30B-sft-6</option>
|
121 |
+
<option value="llama-2-70b-chat">LLaMa-2-70B-chat</option>
|
122 |
</optgroup>
|
123 |
<optgroup label="{{_('IMAGE')}}">
|
124 |
<option value="" disabled>Kandinsky (soon)</option>
|
g4f/Provider/Providers/Chimera.py
CHANGED
@@ -18,6 +18,7 @@ model = [
|
|
18 |
'gpt-4-0314',
|
19 |
'gpt-4-32k',
|
20 |
'llama-2-70b-chat',
|
|
|
21 |
]
|
22 |
supports_stream = True
|
23 |
needs_auth = False
|
|
|
18 |
'gpt-4-0314',
|
19 |
'gpt-4-32k',
|
20 |
'llama-2-70b-chat',
|
21 |
+
'oasst-sft-6-llama-30b'
|
22 |
]
|
23 |
supports_stream = True
|
24 |
needs_auth = False
|
g4f/models.py
CHANGED
@@ -72,11 +72,6 @@ class Model:
|
|
72 |
base_provider: str = 'anthropic'
|
73 |
best_provider: Provider.Provider = Provider.Chimera
|
74 |
|
75 |
-
class sage:
|
76 |
-
name: str = 'sage'
|
77 |
-
base_provider: str = 'poe'
|
78 |
-
best_provider: Provider.Provider = Provider.Chimera
|
79 |
-
|
80 |
class llama_2_7b_chat:
|
81 |
name: str = 'llama-2-7b-chat'
|
82 |
base_provider: str = 'llama'
|
@@ -91,6 +86,11 @@ class Model:
|
|
91 |
name: str = 'llama-2-70b-chat'
|
92 |
base_provider: str = 'llama'
|
93 |
best_provider: Provider.Provider = Provider.Chimera
|
|
|
|
|
|
|
|
|
|
|
94 |
|
95 |
|
96 |
|
@@ -114,6 +114,5 @@ class ModelUtils:
|
|
114 |
'llama-2-7b-chat': Model.llama_2_7b_chat,
|
115 |
'llama-2-13b-chat': Model.llama_2_13b_chat,
|
116 |
'llama-2-70b-chat': Model.llama_2_70b_chat,
|
117 |
-
|
118 |
-
'sage': Model.sage,
|
119 |
}
|
|
|
72 |
base_provider: str = 'anthropic'
|
73 |
best_provider: Provider.Provider = Provider.Chimera
|
74 |
|
|
|
|
|
|
|
|
|
|
|
75 |
class llama_2_7b_chat:
|
76 |
name: str = 'llama-2-7b-chat'
|
77 |
base_provider: str = 'llama'
|
|
|
86 |
name: str = 'llama-2-70b-chat'
|
87 |
base_provider: str = 'llama'
|
88 |
best_provider: Provider.Provider = Provider.Chimera
|
89 |
+
|
90 |
+
class oasst_sft_6_llama_30b:
|
91 |
+
name: str = 'oasst-sft-6-llama-30b'
|
92 |
+
base_provider: str = 'huggingface-chat'
|
93 |
+
best_provider: Provider.Provider = Provider.Chimera
|
94 |
|
95 |
|
96 |
|
|
|
114 |
'llama-2-7b-chat': Model.llama_2_7b_chat,
|
115 |
'llama-2-13b-chat': Model.llama_2_13b_chat,
|
116 |
'llama-2-70b-chat': Model.llama_2_70b_chat,
|
117 |
+
'oasst-sft-6-llama-30b': Model.oasst_sft_6_llama_30b,
|
|
|
118 |
}
|