Spaces:
Running
Running
Husnain
commited on
Commit
•
56aa981
1
Parent(s):
827c283
⚡ [Enhance] Use nous-mixtral-8x7b as default model
Browse files- constants/models.py +29 -11
constants/models.py
CHANGED
@@ -6,16 +6,19 @@ MODEL_MAP = {
|
|
6 |
"gemma-7b": "google/gemma-1.1-7b-it",
|
7 |
"command-r-plus": "CohereForAI/c4ai-command-r-plus",
|
8 |
"llama3-70b": "meta-llama/Meta-Llama-3-70B-Instruct",
|
9 |
-
"
|
|
|
10 |
}
|
11 |
|
12 |
AVAILABLE_MODELS = list(MODEL_MAP.keys())
|
13 |
|
|
|
|
|
14 |
STOP_SEQUENCES_MAP = {
|
15 |
"mixtral-8x7b": "</s>",
|
16 |
"nous-mixtral-8x7b": "<|im_end|>",
|
17 |
"mistral-7b": "</s>",
|
18 |
-
"openchat-3.5": "<|end_of_turn|>",
|
19 |
"gemma-7b": "<eos>",
|
20 |
"command-r-plus": "<|END_OF_TURN_TOKEN|>",
|
21 |
}
|
@@ -24,11 +27,12 @@ TOKEN_LIMIT_MAP = {
|
|
24 |
"mixtral-8x7b": 32768,
|
25 |
"nous-mixtral-8x7b": 32768,
|
26 |
"mistral-7b": 32768,
|
27 |
-
"openchat-3.5": 8192,
|
28 |
"gemma-7b": 8192,
|
29 |
-
"gpt-3.5-turbo": 8192,
|
30 |
"command-r-plus": 32768,
|
31 |
"llama3-70b": 8192,
|
|
|
|
|
32 |
}
|
33 |
|
34 |
TOKEN_RESERVED = 20
|
@@ -71,13 +75,27 @@ AVAILABLE_MODELS_DICTS = [
|
|
71 |
"created": 1700000000,
|
72 |
"owned_by": "Google",
|
73 |
},
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
81 |
{
|
82 |
"id": "gpt-3.5-turbo",
|
83 |
"description": "[openai/gpt-3.5-turbo]: https://platform.openai.com/docs/models/gpt-3-5-turbo",
|
|
|
6 |
"gemma-7b": "google/gemma-1.1-7b-it",
|
7 |
"command-r-plus": "CohereForAI/c4ai-command-r-plus",
|
8 |
"llama3-70b": "meta-llama/Meta-Llama-3-70B-Instruct",
|
9 |
+
"zephyr-141b": "HuggingFaceH4/zephyr-orpo-141b-A35b-v0.1",
|
10 |
+
"default": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
|
11 |
}
|
12 |
|
13 |
AVAILABLE_MODELS = list(MODEL_MAP.keys())
|
14 |
|
15 |
+
PRO_MODELS = ["command-r-plus", "llama3-70b", "zephyr-141b"]
|
16 |
+
|
17 |
STOP_SEQUENCES_MAP = {
|
18 |
"mixtral-8x7b": "</s>",
|
19 |
"nous-mixtral-8x7b": "<|im_end|>",
|
20 |
"mistral-7b": "</s>",
|
21 |
+
# "openchat-3.5": "<|end_of_turn|>",
|
22 |
"gemma-7b": "<eos>",
|
23 |
"command-r-plus": "<|END_OF_TURN_TOKEN|>",
|
24 |
}
|
|
|
27 |
"mixtral-8x7b": 32768,
|
28 |
"nous-mixtral-8x7b": 32768,
|
29 |
"mistral-7b": 32768,
|
30 |
+
# "openchat-3.5": 8192,
|
31 |
"gemma-7b": 8192,
|
|
|
32 |
"command-r-plus": 32768,
|
33 |
"llama3-70b": 8192,
|
34 |
+
"zephyr-141b": 2048,
|
35 |
+
"gpt-3.5-turbo": 8192,
|
36 |
}
|
37 |
|
38 |
TOKEN_RESERVED = 20
|
|
|
75 |
"created": 1700000000,
|
76 |
"owned_by": "Google",
|
77 |
},
|
78 |
+
{
|
79 |
+
"id": "command-r-plus",
|
80 |
+
"description": "[CohereForAI/c4ai-command-r-plus]: https://huggingface.co/CohereForAI/c4ai-command-r-plus",
|
81 |
+
"object": "model",
|
82 |
+
"created": 1700000000,
|
83 |
+
"owned_by": "CohereForAI",
|
84 |
+
},
|
85 |
+
{
|
86 |
+
"id": "llama3-70b",
|
87 |
+
"description": "[meta-llama/Meta-Llama-3-70B]: https://huggingface.co/meta-llama/Meta-Llama-3-70B",
|
88 |
+
"object": "model",
|
89 |
+
"created": 1700000000,
|
90 |
+
"owned_by": "Meta",
|
91 |
+
},
|
92 |
+
{
|
93 |
+
"id": "zephyr-141b",
|
94 |
+
"description": "[HuggingFaceH4/zephyr-orpo-141b-A35b-v0.1]: https://huggingface.co/HuggingFaceH4/zephyr-orpo-141b-A35b-v0.1",
|
95 |
+
"object": "model",
|
96 |
+
"created": 1700000000,
|
97 |
+
"owned_by": "Huggingface",
|
98 |
+
},
|
99 |
{
|
100 |
"id": "gpt-3.5-turbo",
|
101 |
"description": "[openai/gpt-3.5-turbo]: https://platform.openai.com/docs/models/gpt-3-5-turbo",
|