Update README.md
Browse files
README.md
CHANGED
@@ -66,6 +66,81 @@ Use the code below to get started with the model.
|
|
66 |
|
67 |
[Tonic/MistralMED_Chat](https://huggingface.co/Tonic/MistralMED_Chat)
|
68 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
69 |
## Training Details
|
70 |
|
71 |
### Training Data
|
|
|
66 |
|
67 |
[Tonic/MistralMED_Chat](https://huggingface.co/Tonic/MistralMED_Chat)
|
68 |
|
69 |
+
```python
|
70 |
+
from transformers import AutoConfig, AutoTokenizer, AutoModelForSeq2SeqLM, AutoModelForCausalLM, MistralForCausalLM
|
71 |
+
from peft import PeftModel, PeftConfig
|
72 |
+
import torch
|
73 |
+
import gradio as gr
|
74 |
+
|
75 |
+
|
76 |
+
# Use the base model's ID
|
77 |
+
base_model_id = "mistralai/Mistral-7B-v0.1"
|
78 |
+
model_directory = "Tonic/mistralmed"
|
79 |
+
|
80 |
+
# Instantiate the Models
|
81 |
+
tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1", trust_remote_code=True, padding_side="left")
|
82 |
+
#tokenizer.pad_token = tokenizer.eos_token
|
83 |
+
#tokenizer.padding_side = 'left'
|
84 |
+
|
85 |
+
|
86 |
+
# Specify the configuration class for the model
|
87 |
+
#model_config = AutoConfig.from_pretrained(base_model_id)
|
88 |
+
|
89 |
+
# Load the PEFT model with the specified configuration
|
90 |
+
#peft_model = AutoModelForCausalLM.from_pretrained(base_model_id, config=model_config)
|
91 |
+
|
92 |
+
# Load the PEFT model
|
93 |
+
peft_config = PeftConfig.from_pretrained("Tonic/mistralmed", token="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")
|
94 |
+
peft_model = MistralForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1", trust_remote_code=True)
|
95 |
+
peft_model = PeftModel.from_pretrained(peft_model, "Tonic/mistralmed", token="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")
|
96 |
+
|
97 |
+
class ChatBot:
|
98 |
+
def __init__(self):
|
99 |
+
self.history = []
|
100 |
+
|
101 |
+
def predict(self, input):
|
102 |
+
# Encode user input
|
103 |
+
user_input_ids = tokenizer.encode(input + tokenizer.eos_token, return_tensors="pt")
|
104 |
+
|
105 |
+
# Concatenate the user input with chat history
|
106 |
+
if self.history:
|
107 |
+
chat_history_ids = torch.cat([self.history, user_input_ids], dim=-1)
|
108 |
+
else:
|
109 |
+
chat_history_ids = user_input_ids
|
110 |
+
|
111 |
+
# Generate a response using the PEFT model
|
112 |
+
# response = peft_model.generate(chat_history_ids, max_length=512, pad_token_id=tokenizer.eos_token_id)
|
113 |
+
# response = peft_model.generate(chat_history_ids)
|
114 |
+
response = peft_model.generate(input_ids=chat_history_ids, max_length=512, pad_token_id=tokenizer.eos_token_id)
|
115 |
+
|
116 |
+
|
117 |
+
|
118 |
+
# Update chat history
|
119 |
+
self.history = chat_history_ids
|
120 |
+
|
121 |
+
# Decode and return the response
|
122 |
+
response_text = tokenizer.decode(response[0], skip_special_tokens=True)
|
123 |
+
return response_text
|
124 |
+
|
125 |
+
bot = ChatBot()
|
126 |
+
|
127 |
+
title = "👋🏻Welcome to Tonic's MistralMed Chat🚀"
|
128 |
+
description = "You can use this Space to test out the current model (MistralMed) or duplicate this Space and use it for any other model on 🤗HuggingFace. Join me on Discord to build together."
|
129 |
+
examples = [["What is the boiling point of nitrogen"]]
|
130 |
+
|
131 |
+
iface = gr.Interface(
|
132 |
+
fn=bot.predict,
|
133 |
+
title=title,
|
134 |
+
description=description,
|
135 |
+
examples=examples,
|
136 |
+
inputs="text",
|
137 |
+
outputs="text",
|
138 |
+
theme="ParityError/Anime"
|
139 |
+
)
|
140 |
+
|
141 |
+
iface.launch()
|
142 |
+
```
|
143 |
+
|
144 |
## Training Details
|
145 |
|
146 |
### Training Data
|