Spaces:
Running
Running
:gem: [Feature] New model supported: command-r-plus (CohereForai/c4ai-command-r-plus)
Browse files- constants/models.py +11 -9
- messagers/message_composer.py +8 -3
constants/models.py
CHANGED
@@ -4,9 +4,11 @@ MODEL_MAP = {
|
|
4 |
"mistral-7b": "mistralai/Mistral-7B-Instruct-v0.2",
|
5 |
"openchat-3.5": "openchat/openchat-3.5-0106",
|
6 |
"gemma-7b": "google/gemma-7b-it",
|
|
|
7 |
"default": "mistralai/Mixtral-8x7B-Instruct-v0.1",
|
8 |
}
|
9 |
|
|
|
10 |
|
11 |
STOP_SEQUENCES_MAP = {
|
12 |
"mixtral-8x7b": "</s>",
|
@@ -14,6 +16,7 @@ STOP_SEQUENCES_MAP = {
|
|
14 |
"mistral-7b": "</s>",
|
15 |
"openchat-3.5": "<|end_of_turn|>",
|
16 |
"gemma-7b": "<eos>",
|
|
|
17 |
}
|
18 |
|
19 |
TOKEN_LIMIT_MAP = {
|
@@ -23,20 +26,12 @@ TOKEN_LIMIT_MAP = {
|
|
23 |
"openchat-3.5": 8192,
|
24 |
"gemma-7b": 8192,
|
25 |
"gpt-3.5-turbo": 8192,
|
|
|
26 |
}
|
27 |
|
28 |
TOKEN_RESERVED = 20
|
29 |
|
30 |
|
31 |
-
AVAILABLE_MODELS = [
|
32 |
-
"mixtral-8x7b",
|
33 |
-
"nous-mixtral-8x7b",
|
34 |
-
"mistral-7b",
|
35 |
-
"openchat-3.5",
|
36 |
-
"gemma-7b",
|
37 |
-
"gpt-3.5-turbo",
|
38 |
-
]
|
39 |
-
|
40 |
# https://platform.openai.com/docs/api-reference/models/list
|
41 |
AVAILABLE_MODELS_DICTS = [
|
42 |
{
|
@@ -74,6 +69,13 @@ AVAILABLE_MODELS_DICTS = [
|
|
74 |
"created": 1700000000,
|
75 |
"owned_by": "Google",
|
76 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
77 |
{
|
78 |
"id": "gpt-3.5-turbo",
|
79 |
"description": "[openai/gpt-3.5-turbo]: https://platform.openai.com/docs/models/gpt-3-5-turbo",
|
|
|
4 |
"mistral-7b": "mistralai/Mistral-7B-Instruct-v0.2",
|
5 |
"openchat-3.5": "openchat/openchat-3.5-0106",
|
6 |
"gemma-7b": "google/gemma-7b-it",
|
7 |
+
"command-r-plus": "CohereForAI/c4ai-command-r-plus",
|
8 |
"default": "mistralai/Mixtral-8x7B-Instruct-v0.1",
|
9 |
}
|
10 |
|
11 |
+
AVAILABLE_MODELS = list(MODEL_MAP.keys())
|
12 |
|
13 |
STOP_SEQUENCES_MAP = {
|
14 |
"mixtral-8x7b": "</s>",
|
|
|
16 |
"mistral-7b": "</s>",
|
17 |
"openchat-3.5": "<|end_of_turn|>",
|
18 |
"gemma-7b": "<eos>",
|
19 |
+
"command-r-plus": "<|END_OF_TURN_TOKEN|>",
|
20 |
}
|
21 |
|
22 |
TOKEN_LIMIT_MAP = {
|
|
|
26 |
"openchat-3.5": 8192,
|
27 |
"gemma-7b": 8192,
|
28 |
"gpt-3.5-turbo": 8192,
|
29 |
+
"command-r-plus": 32768,
|
30 |
}
|
31 |
|
32 |
TOKEN_RESERVED = 20
|
33 |
|
34 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
# https://platform.openai.com/docs/api-reference/models/list
|
36 |
AVAILABLE_MODELS_DICTS = [
|
37 |
{
|
|
|
69 |
"created": 1700000000,
|
70 |
"owned_by": "Google",
|
71 |
},
|
72 |
+
{
|
73 |
+
"id": "command-r-plus",
|
74 |
+
"description": "[CohereForAI/c4ai-command-r-plus]: https://huggingface.co/CohereForAI/c4ai-command-r-plus",
|
75 |
+
"object": "model",
|
76 |
+
"created": 1700000000,
|
77 |
+
"owned_by": "CohereForAI",
|
78 |
+
},
|
79 |
{
|
80 |
"id": "gpt-3.5-turbo",
|
81 |
"description": "[openai/gpt-3.5-turbo]: https://platform.openai.com/docs/models/gpt-3-5-turbo",
|
messagers/message_composer.py
CHANGED
@@ -151,11 +151,15 @@ class MessageComposer:
|
|
151 |
# https://huggingface.co/NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO#prompt-format
|
152 |
# https://huggingface.co/openchat/openchat-3.5-0106
|
153 |
# elif self.model in ["openchat-3.5", "nous-mixtral-8x7b"]:
|
154 |
-
|
155 |
-
tokenizer = AutoTokenizer.from_pretrained(
|
156 |
self.merged_str = tokenizer.apply_chat_template(
|
157 |
messages, tokenize=False, add_generation_prompt=True
|
158 |
)
|
|
|
|
|
|
|
|
|
159 |
|
160 |
return self.merged_str
|
161 |
|
@@ -164,7 +168,8 @@ if __name__ == "__main__":
|
|
164 |
# model = "mixtral-8x7b"
|
165 |
# model = "nous-mixtral-8x7b"
|
166 |
# model = "gemma-7b"
|
167 |
-
model = "openchat-3.5"
|
|
|
168 |
composer = MessageComposer(model)
|
169 |
messages = [
|
170 |
{
|
|
|
151 |
# https://huggingface.co/NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO#prompt-format
|
152 |
# https://huggingface.co/openchat/openchat-3.5-0106
|
153 |
# elif self.model in ["openchat-3.5", "nous-mixtral-8x7b"]:
|
154 |
+
elif self.model in ["openchat-3.5", "command-r-plus"]:
|
155 |
+
tokenizer = AutoTokenizer.from_pretrained(self.model_fullname)
|
156 |
self.merged_str = tokenizer.apply_chat_template(
|
157 |
messages, tokenize=False, add_generation_prompt=True
|
158 |
)
|
159 |
+
else:
|
160 |
+
self.merged_str = "\n\n".join(
|
161 |
+
[f"{message['role']}: {message['content']}" for message in messages]
|
162 |
+
)
|
163 |
|
164 |
return self.merged_str
|
165 |
|
|
|
168 |
# model = "mixtral-8x7b"
|
169 |
# model = "nous-mixtral-8x7b"
|
170 |
# model = "gemma-7b"
|
171 |
+
# model = "openchat-3.5"
|
172 |
+
model = "command-r-plus"
|
173 |
composer = MessageComposer(model)
|
174 |
messages = [
|
175 |
{
|