ikigai-chat / mistral7b.py
pragneshbarik's picture
improved dependancies
6c6516f
raw
history blame
1.24 kB
from huggingface_hub import InferenceClient
import os
from dotenv import load_dotenv
load_dotenv()
API_TOKEN = os.getenv('HF_TOKEN')
client = InferenceClient(
"mistralai/Mistral-7B-Instruct-v0.1",
token=API_TOKEN
)
def format_prompt(message, history):
prompt = "<s>"
for user_prompt, bot_response in history:
prompt += f"[INST] {user_prompt} [/INST]"
prompt += f" {bot_response}</s> "
prompt += f"[INST] {message} [/INST]"
return prompt
def mistral(
prompt, history, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0,
):
temperature = float(temperature)
if temperature < 1e-2:
temperature = 1e-2
top_p = float(top_p)
generate_kwargs = dict(
temperature=temperature,
max_new_tokens=max_new_tokens,
top_p=top_p,
repetition_penalty=repetition_penalty,
do_sample=True,
seed=42,
)
formatted_prompt = format_prompt(prompt, history)
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
output = ""
for response in stream:
# print(response)
output += response.token.text
# yield output
return output