|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
import gradio as gr |
|
import torch |
|
|
|
base_model = AutoModelForCausalLM.from_pretrained( |
|
Mistral, |
|
quantization_config=bnb_config, |
|
device_map="auto", |
|
trust_remote_code=True, |
|
use_auth_token=api_token |
|
) |
|
|
|
tokenizer = AutoTokenizer.from_pretrained(base_model_id, trust_remote_code=True) |
|
tokenizer.pad_token = tokenizer.eos_tokentokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1") |
|
tokenizer.padding_side = 'left' |
|
model = PeftModel.from_pretrained(base_model, "Tonic/mistralmed") |
|
|
|
class ChatBot: |
|
def __init__(self): |
|
self.history = [] |
|
|
|
def predict(self, input): |
|
new_user_input_ids = tokenizer.encode(input + tokenizer.eos_token, return_tensors="pt") |
|
flat_history = [item for sublist in self.history for item in sublist] |
|
flat_history_tensor = torch.tensor(flat_history).unsqueeze(dim=0) |
|
bot_input_ids = torch.cat([flat_history_tensor, new_user_input_ids], dim=-1) if self.history else new_user_input_ids |
|
chat_history_ids = model.generate(bot_input_ids, max_length=2000, pad_token_id=tokenizer.eos_token_id) |
|
self.history.append(chat_history_ids[:, bot_input_ids.shape[-1]:].tolist()[0]) |
|
response = tokenizer.decode(chat_history_ids[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True) |
|
return response |
|
|
|
bot = ChatBot() |
|
|
|
title = "👋🏻Welcome to Tonic's EZ Chat🚀" |
|
description = "You can use this Space to test out the current model (MistralMed) or duplicate this Space and use it for any other model on 🤗HuggingFace. Join me on [Discord](https://discord.gg/fpEPNZGsbt) to build together." |
|
examples = [["What is the boiling point of nitrogen?"]] |
|
|
|
iface = gr.Interface( |
|
fn=bot.predict, |
|
title=title, |
|
description=description, |
|
examples=examples, |
|
inputs="text", |
|
outputs="text", |
|
theme="ParityError/Anime" |
|
) |
|
|
|
iface.launch() |
|
|