mixtral-search / middlewares /chat_client.py
pragneshbarik's picture
inital commit
831e906
raw
history blame
1.24 kB
from huggingface_hub import InferenceClient
import os
from dotenv import load_dotenv
load_dotenv()
API_TOKEN = os.getenv("HF_TOKEN")
def format_prompt(message, history):
prompt = "<s>"
for user_prompt, bot_response in history:
prompt += f"[INST] {user_prompt} [/INST]"
prompt += f" {bot_response}</s> "
prompt += f"[INST] {message} [/INST]"
return prompt
def chat(
prompt,
history,
chat_client="mistralai/Mistral-7B-Instruct-v0.1",
temperature=0.9,
max_new_tokens=256,
top_p=0.95,
repetition_penalty=1.0,
truncate = False
):
client = InferenceClient(chat_client, token=API_TOKEN)
temperature = float(temperature)
if temperature < 1e-2:
temperature = 1e-2
top_p = float(top_p)
generate_kwargs = dict(
temperature=temperature,
max_new_tokens=max_new_tokens,
top_p=top_p,
repetition_penalty=repetition_penalty,
do_sample=True,
seed=42,
)
formatted_prompt = format_prompt(prompt, history)
stream = client.text_generation(
formatted_prompt,
**generate_kwargs,
stream=True,
details=True,
return_full_text=False,
)
return stream