|
from huggingface_hub import Repository |
|
import gradio as gr |
|
import os |
|
from peft import AutoPeftModelForCausalLM |
|
from transformers import GenerationConfig |
|
from transformers import AutoTokenizer |
|
import torch |
|
|
|
|
|
model_name = "adi1193/mistral-postv6" |
|
repository = Repository(model_name, clone_from="adi1193/mistral-postv6") |
|
|
|
|
|
model_path = repository.local_dir |
|
model = AutoPeftModelForCausalLM.from_pretrained( |
|
model_path, |
|
low_cpu_mem_usage=True, |
|
return_dict=True, |
|
torch_dtype=torch.float16, |
|
device_map="cuda") |
|
|
|
tokenizer = AutoTokenizer.from_pretrained(model_path) |
|
|
|
generation_config = GenerationConfig( |
|
do_sample=True, |
|
top_k=1, |
|
temperature=0.1, |
|
max_new_tokens=100, |
|
pad_token_id=tokenizer.eos_token_id |
|
) |
|
|
|
def format_prompt(message): |
|
input_str = "###Human: " + message + "###Assistant: " |
|
inputs = tokenizer(input_str, return_tensors="pt").to("cuda") |
|
outputs = model.generate(**inputs, generation_config=generation_config) |
|
return tokenizer.decode(outputs[0], skip_special_tokens=True).replace(input_str, '') |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
additional_inputs=[ |
|
gr.Slider( |
|
label="Temperature", |
|
value=0.9, |
|
minimum=0.0, |
|
maximum=1.0, |
|
step=0.05, |
|
interactive=True, |
|
info="Higher values produce more diverse outputs", |
|
), |
|
gr.Slider( |
|
label="Max new tokens", |
|
value=256, |
|
minimum=0, |
|
maximum=1048, |
|
step=64, |
|
interactive=True, |
|
info="The maximum numbers of new tokens", |
|
), |
|
gr.Slider( |
|
label="Top-p (nucleus sampling)", |
|
value=0.90, |
|
minimum=0.0, |
|
maximum=1, |
|
step=0.05, |
|
interactive=True, |
|
info="Higher values sample more low-probability tokens", |
|
), |
|
gr.Slider( |
|
label="Repetition penalty", |
|
value=1.2, |
|
minimum=1.0, |
|
maximum=2.0, |
|
step=0.05, |
|
interactive=True, |
|
info="Penalize repeated tokens", |
|
), |
|
gr.Checkbox( |
|
label="Hinglish", |
|
value=False, |
|
interactive=True, |
|
info="Enables the MistralTalk to talk in Hinglish (Combination of Hindi and English)", |
|
) |
|
] |
|
|
|
css = """ |
|
#mkd { |
|
height: 500px; |
|
overflow: auto; |
|
border: 1px solid #ccc; |
|
} |
|
""" |
|
|
|
with gr.Blocks(css=css) as demo: |
|
gr.HTML("<h1><center>MistralTalk🗣️<h1><center>") |
|
gr.HTML("<h3><center>In this demo, you can chat with <a href='https://huggingface.co/adi1193/mistral-postv6'>Mistral-8x7B</a> model. 💬<h3><center>") |
|
gr.HTML("<h3><center>Learn more about the model <a href='https://huggingface.co/docs/transformers/main/model_doc/mistral'>here</a>. 📚<h3><center>") |
|
gr.ChatInterface( |
|
format_prompt, |
|
additional_inputs=additional_inputs, |
|
theme = gr.themes.Soft(), |
|
examples=[["What is the interest?"], ["How does the universe work?"],["What can you do?"],["What is quantum mechanics?"],["Do you believe in an after life?"]] |
|
) |
|
|
|
if __name__ == "__main__": |
|
demo.launch() |
|
|
|
|