Spaces:
Sleeping
Sleeping
Yarik
commited on
Commit
β’
58a3ca9
1
Parent(s):
7abb8c4
Update space
Browse files- apis/chat_api.py +7 -0
- messagers/message_composer.py +37 -0
- networks/message_streamer.py +5 -0
apis/chat_api.py
CHANGED
@@ -61,6 +61,13 @@ class ChatAPIApp:
|
|
61 |
"created": 1700000000,
|
62 |
"owned_by": "NousResearch",
|
63 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
64 |
],
|
65 |
}
|
66 |
return self.available_models
|
|
|
61 |
"created": 1700000000,
|
62 |
"owned_by": "NousResearch",
|
63 |
},
|
64 |
+
{
|
65 |
+
"id": "falcon-40b-instruct-GPTQ-inference-endpoints",
|
66 |
+
"description": "[philschmid/falcon-40b-instruct-GPTQ-inference-endpoints]: https://huggingface.co/philschmid/falcon-40b-instruct-GPTQ-inference-endpoints",
|
67 |
+
"object": "model",
|
68 |
+
"created": 1700000000,
|
69 |
+
"owned_by": "philschmid",
|
70 |
+
}
|
71 |
],
|
72 |
}
|
73 |
return self.available_models
|
messagers/message_composer.py
CHANGED
@@ -9,6 +9,7 @@ class MessageComposer:
|
|
9 |
"mixtral-8x7b",
|
10 |
"mistral-7b",
|
11 |
"openchat-3.5",
|
|
|
12 |
"nous-mixtral-8x7b",
|
13 |
]
|
14 |
|
@@ -116,6 +117,27 @@ class MessageComposer:
|
|
116 |
)
|
117 |
self.merged_str_list.append(f"GPT4 Correct Assistant:\n")
|
118 |
self.merged_str = "\n".join(self.merged_str_list)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
119 |
else:
|
120 |
self.merged_str = "\n".join(
|
121 |
[
|
@@ -206,6 +228,21 @@ class MessageComposer:
|
|
206 |
self.append_last_instruction_to_messages(
|
207 |
inst_matches_list, pair_matches_list
|
208 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
209 |
else:
|
210 |
self.messages = [
|
211 |
{
|
|
|
9 |
"mixtral-8x7b",
|
10 |
"mistral-7b",
|
11 |
"openchat-3.5",
|
12 |
+
"falcon-40b-instruct-GPTQ-inference-endpoints",
|
13 |
"nous-mixtral-8x7b",
|
14 |
]
|
15 |
|
|
|
117 |
)
|
118 |
self.merged_str_list.append(f"GPT4 Correct Assistant:\n")
|
119 |
self.merged_str = "\n".join(self.merged_str_list)
|
120 |
+
elif self.model in ["falcon-40b-instruct-GPTQ-inference-endpoints"]:
|
121 |
+
self.messages = self.concat_messages_by_role(messages)
|
122 |
+
self.merged_str_list = []
|
123 |
+
self.end_of_turn = "<|end_of_turn|>"
|
124 |
+
for message in self.messages:
|
125 |
+
role = message["role"]
|
126 |
+
content = message["content"]
|
127 |
+
if role in self.inst_roles:
|
128 |
+
self.merged_str_list.append(
|
129 |
+
f"GPT4 Correct User:\n{content}{self.end_of_turn}"
|
130 |
+
)
|
131 |
+
elif role in self.answer_roles:
|
132 |
+
self.merged_str_list.append(
|
133 |
+
f"GPT4 Correct Assistant:\n{content}{self.end_of_turn}"
|
134 |
+
)
|
135 |
+
else:
|
136 |
+
self.merged_str_list.append(
|
137 |
+
f"GPT4 Correct User: {content}{self.end_of_turn}"
|
138 |
+
)
|
139 |
+
self.merged_str_list.append(f"GPT4 Correct Assistant:\n")
|
140 |
+
self.merged_str = "\n".join(self.merged_str_list)
|
141 |
else:
|
142 |
self.merged_str = "\n".join(
|
143 |
[
|
|
|
228 |
self.append_last_instruction_to_messages(
|
229 |
inst_matches_list, pair_matches_list
|
230 |
)
|
231 |
+
elif self.model in ["falcon-40b-instruct-GPTQ-inference-endpoints"]:
|
232 |
+
pair_pattern = r"GPT4 Correct User:(?P<inst>[\s\S]*?)<\|end_of_turn\|>\s*GPT4 Correct Assistant:(?P<answer>[\s\S]*?)<\|end_of_turn\|>"
|
233 |
+
pair_matches = re.finditer(
|
234 |
+
pair_pattern, self.merged_str, flags=re.MULTILINE | re.IGNORECASE
|
235 |
+
)
|
236 |
+
pair_matches_list = list(pair_matches)
|
237 |
+
self.messages = self.convert_pair_matches_to_messages(pair_matches_list)
|
238 |
+
inst_pattern = r"GPT4 Correct User:(?P<inst>[\s\S]*?)<\|end_of_turn\|>"
|
239 |
+
inst_matches = re.finditer(
|
240 |
+
inst_pattern, self.merged_str, flags=re.MULTILINE | re.IGNORECASE
|
241 |
+
)
|
242 |
+
inst_matches_list = list(inst_matches)
|
243 |
+
self.append_last_instruction_to_messages(
|
244 |
+
inst_matches_list, pair_matches_list
|
245 |
+
)
|
246 |
else:
|
247 |
self.messages = [
|
248 |
{
|
networks/message_streamer.py
CHANGED
@@ -13,6 +13,7 @@ class MessageStreamer:
|
|
13 |
"mistral-7b": "mistralai/Mistral-7B-Instruct-v0.2", # 65.71, fast
|
14 |
"nous-mixtral-8x7b": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
|
15 |
"openchat-3.5": "openchat/openchat-3.5-0106", # 68.89, fast
|
|
|
16 |
# "zephyr-7b-beta": "HuggingFaceH4/zephyr-7b-beta", # β Too Slow
|
17 |
# "llama-70b": "meta-llama/Llama-2-70b-chat-hf", # β Require Pro User
|
18 |
# "codellama-34b": "codellama/CodeLlama-34b-Instruct-hf", # β Low Score
|
@@ -24,12 +25,16 @@ class MessageStreamer:
|
|
24 |
"mistral-7b": "</s>",
|
25 |
"nous-mixtral-8x7b": "<|im_end|>",
|
26 |
"openchat-3.5": "<|end_of_turn|>",
|
|
|
|
|
27 |
}
|
28 |
TOKEN_LIMIT_MAP = {
|
29 |
"mixtral-8x7b": 32768,
|
30 |
"mistral-7b": 32768,
|
31 |
"nous-mixtral-8x7b": 32768,
|
32 |
"openchat-3.5": 8192,
|
|
|
|
|
33 |
}
|
34 |
TOKEN_RESERVED = 100
|
35 |
|
|
|
13 |
"mistral-7b": "mistralai/Mistral-7B-Instruct-v0.2", # 65.71, fast
|
14 |
"nous-mixtral-8x7b": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
|
15 |
"openchat-3.5": "openchat/openchat-3.5-0106", # 68.89, fast
|
16 |
+
"falcon-40b-instruct-GPTQ-inference-endpoints": "philschmid/falcon-40b-instruct-GPTQ-inference-endpoints",
|
17 |
# "zephyr-7b-beta": "HuggingFaceH4/zephyr-7b-beta", # β Too Slow
|
18 |
# "llama-70b": "meta-llama/Llama-2-70b-chat-hf", # β Require Pro User
|
19 |
# "codellama-34b": "codellama/CodeLlama-34b-Instruct-hf", # β Low Score
|
|
|
25 |
"mistral-7b": "</s>",
|
26 |
"nous-mixtral-8x7b": "<|im_end|>",
|
27 |
"openchat-3.5": "<|end_of_turn|>",
|
28 |
+
"falcon-40b-instruct-GPTQ-inference-endpoints": "<|end_of_turn|>",
|
29 |
+
|
30 |
}
|
31 |
TOKEN_LIMIT_MAP = {
|
32 |
"mixtral-8x7b": 32768,
|
33 |
"mistral-7b": 32768,
|
34 |
"nous-mixtral-8x7b": 32768,
|
35 |
"openchat-3.5": 8192,
|
36 |
+
"falcon-40b-instruct-GPTQ-inference-endpoints": 8192,
|
37 |
+
|
38 |
}
|
39 |
TOKEN_RESERVED = 100
|
40 |
|