Spaces:
Sleeping
Sleeping
File size: 7,422 Bytes
da27d7f 0ec6f70 da27d7f 66a1c16 3895485 da27d7f 66a1c16 da27d7f 129a4cd 66a1c16 da27d7f 0ec6f70 66a1c16 0ec6f70 1012b47 66a1c16 0ec6f70 da27d7f 3090644 0ec6f70 da27d7f 3090644 0ec6f70 3090644 f5ce7b4 0ec6f70 da27d7f 66a1c16 0ec6f70 da27d7f 66a1c16 da27d7f 0ec6f70 66a1c16 0ec6f70 66a1c16 0ec6f70 da27d7f 66a1c16 da27d7f 0ec6f70 66a1c16 da27d7f 66a1c16 da27d7f 3090644 da27d7f 3090644 da27d7f 3090644 da27d7f 3090644 0ec6f70 da27d7f 0ec6f70 da27d7f 0ec6f70 da27d7f 3090644 da27d7f 6981361 3090644 da27d7f 0ec6f70 da27d7f 3090644 da27d7f f85fe6f 3090644 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 |
import json
import re
import requests
from tiktoken import get_encoding as tiktoken_get_encoding
from messagers.message_outputer import OpenaiStreamOutputer
from utils.logger import logger
from utils.enver import enver
class MessageStreamer:
MODEL_MAP = {
"mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1", # 72.62, fast [Recommended]
"mistral-7b": "mistralai/Mistral-7B-Instruct-v0.2", # 65.71, fast
"nous-mixtral-8x7b": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
"gemma-7b": "google/gemma-7b-it",
"codellama-7b": "codellama/CodeLlama-7b-hf", # ❌ Low Score
"default": "mistralai/Mixtral-8x7B-Instruct-v0.1",
}
STOP_SEQUENCES_MAP = {
"mixtral-8x7b": "</s>",
"mistral-7b": "</s>",
"nous-mixtral-8x7b": "<|im_end|>",
"openchat-3.5": "<|end_of_turn|>",
"codellama-7b": "",
"gemma-7b": "<eos>",
}
TOKEN_LIMIT_MAP = {
"mixtral-8x7b": 32768,
"mistral-7b": 32768,
"nous-mixtral-8x7b": 32768,
"openchat-3.5": 8192,
"codellama-7b": 8192,
"gemma-7b": 8192,
}
TOKEN_RESERVED = 100
def __init__(self, model: str):
if model in self.MODEL_MAP.keys():
self.model = model
else:
self.model = "default"
self.model_fullname = self.MODEL_MAP[self.model]
self.message_outputer = OpenaiStreamOutputer()
self.tokenizer = tiktoken_get_encoding("cl100k_base")
def parse_line(self, line):
line = line.decode("utf-8")
line = re.sub(r"data:\s*", "", line)
data = json.loads(line)
try:
content = data["token"]["text"]
except:
logger.err(data)
return content
def count_tokens(self, text):
tokens = self.tokenizer.encode(text)
token_count = len(tokens)
logger.note(f"Prompt Token Count: {token_count}")
return token_count
def chat_response(
self,
prompt: str = None,
temperature: float = 0.5,
top_p: float = 0.95,
max_new_tokens: int = None,
api_key: str = None,
use_cache: bool = False,
):
# https://huggingface.co/docs/api-inference/detailed_parameters?code=curl
# curl --proxy http://<server>:<port> https://api-inference.huggingface.co/models/<org>/<model_name> -X POST -d '{"inputs":"who are you?","parameters":{"max_new_token":64}}' -H 'Content-Type: application/json' -H 'Authorization: Bearer <HF_TOKEN>'
self.request_url = (
f"https://api-inference.huggingface.co/models/{self.model_fullname}"
)
self.request_headers = {
"Content-Type": "application/json",
}
if api_key:
logger.note(
f"Using API Key: {api_key[:3]}{(len(api_key)-7)*'*'}{api_key[-4:]}"
)
self.request_headers["Authorization"] = f"Bearer {api_key}"
if temperature is None or temperature < 0:
temperature = 0.0
# temperature must 0 < and < 1 for HF LLM models
temperature = max(temperature, 0.01)
temperature = min(temperature, 0.99)
top_p = max(top_p, 0.01)
top_p = min(top_p, 0.99)
token_limit = int(
self.TOKEN_LIMIT_MAP[self.model]
- self.TOKEN_RESERVED
- self.count_tokens(prompt) * 1.35
)
if token_limit <= 0:
raise ValueError("Prompt exceeded token limit!")
if max_new_tokens is None or max_new_tokens <= 0:
max_new_tokens = token_limit
else:
max_new_tokens = min(max_new_tokens, token_limit)
# References:
# huggingface_hub/inference/_client.py:
# class InferenceClient > def text_generation()
# huggingface_hub/inference/_text_generation.py:
# class TextGenerationRequest > param `stream`
# https://huggingface.co/docs/text-generation-inference/conceptual/streaming#streaming-with-curl
# https://huggingface.co/docs/api-inference/detailed_parameters#text-generation-task
self.request_body = {
"inputs": prompt,
"parameters": {
"temperature": temperature,
"top_p": top_p,
"max_new_tokens": max_new_tokens,
"return_full_text": False,
},
"options": {
"use_cache": use_cache,
},
"stream": True,
}
if self.model in self.STOP_SEQUENCES_MAP.keys():
self.stop_sequences = self.STOP_SEQUENCES_MAP[self.model]
# self.request_body["parameters"]["stop_sequences"] = [
# self.STOP_SEQUENCES[self.model]
# ]
logger.back(self.request_url)
enver.set_envs(proxies=True)
stream_response = requests.post(
self.request_url,
headers=self.request_headers,
json=self.request_body,
proxies=enver.requests_proxies,
stream=True,
)
status_code = stream_response.status_code
if status_code == 200:
logger.success(status_code)
else:
logger.err(status_code)
return stream_response
def chat_return_dict(self, stream_response):
# https://platform.openai.com/docs/guides/text-generation/chat-completions-response-format
final_output = self.message_outputer.default_data.copy()
final_output["choices"] = [
{
"index": 0,
"finish_reason": "stop",
"message": {
"role": "assistant",
"content": "",
},
}
]
logger.back(final_output)
final_content = ""
for line in stream_response.iter_lines():
if not line:
continue
content = self.parse_line(line)
if content.strip() == self.stop_sequences:
logger.success("\n[Finished]")
break
else:
logger.back(content, end="")
final_content += content
if self.model in self.STOP_SEQUENCES_MAP.keys():
final_content = final_content.replace(self.stop_sequences, "")
final_content = final_content.strip()
final_output["choices"][0]["message"]["content"] = final_content
return final_output
def chat_return_generator(self, stream_response):
is_finished = False
line_count = 0
for line in stream_response.iter_lines():
if line:
line_count += 1
else:
continue
content = self.parse_line(line)
if content.strip() == self.stop_sequences:
content_type = "Finished"
logger.success("\n[Finished]")
is_finished = True
else:
content_type = "Completions"
if line_count == 1:
content = content.lstrip()
logger.back(content, end="")
output = self.message_outputer.output(
content=content, content_type=content_type
)
yield output
if not is_finished:
yield self.message_outputer.output(content="", content_type="Finished") |