sethuiyer commited on
Commit
d1a73a9
1 Parent(s): d545865

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +41 -29
README.md CHANGED
@@ -173,37 +173,49 @@ The current model demonstrates a substantial improvement over the previous [Dr.
173
 
174
  ### Usage:
175
  ```python
 
176
  from transformers import AutoTokenizer, AutoModelForCausalLM
177
 
178
- # Load tokenizer and model
179
- tokenizer = AutoTokenizer.from_pretrained("sethuiyer/Medichat-Llama3-8B")
180
- model = AutoModelForCausalLM.from_pretrained("sethuiyer/Medichat-Llama3-8B").to("cuda")
181
-
182
- # Function to format and generate response with prompt engineering using a chat template
183
- def askme(question):
184
- sys_message = '''
185
- You are an AI Medical Assistant trained on a vast dataset of health information. Please be thorough and
186
- provide an informative answer. If you don't know the answer to a specific medical inquiry, advise seeking professional help.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
187
  '''
 
 
188
 
189
- # Create messages structured for the chat template
190
- messages = [{"role": "system", "content": sys_message}, {"role": "user", "content": question}]
191
-
192
- # Applying chat template
193
- prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
194
- inputs = tokenizer(prompt, return_tensors="pt").to("cuda")
195
- outputs = model.generate(**inputs, max_new_tokens=512, use_cache=True) # Adjust max_new_tokens for longer responses
196
-
197
- # Extract and return the generated text
198
- answer = tokenizer.batch_decode(outputs)[0].strip()
199
- return answer
200
-
201
- # Example usage
202
- question = '''
203
- Symptoms:
204
- Dizziness, headache and nausea.
205
-
206
- What is the differnetial diagnosis?
207
- '''
208
- print(askme(question))
209
  ```
 
 
 
 
 
 
173
 
174
  ### Usage:
175
  ```python
176
+ import torch
177
  from transformers import AutoTokenizer, AutoModelForCausalLM
178
 
179
+ class MedicalAssistant:
180
+ def __init__(self, model_name="sethuiyer/Medichat-Llama3-8B", device="cuda"):
181
+ self.device = device
182
+ self.tokenizer = AutoTokenizer.from_pretrained(model_name)
183
+ self.model = AutoModelForCausalLM.from_pretrained(model_name).to(self.device)
184
+ self.sys_message = '''
185
+ You are an AI Medical Assistant trained on a vast dataset of health information. Please be thorough and
186
+ provide an informative answer. If you don't know the answer to a specific medical inquiry, advise seeking professional help.
187
+ '''
188
+
189
+ def format_prompt(self, question):
190
+ messages = [
191
+ {"role": "system", "content": self.sys_message},
192
+ {"role": "user", "content": question}
193
+ ]
194
+ prompt = self.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
195
+ return prompt
196
+
197
+ def generate_response(self, question, max_new_tokens=512):
198
+ prompt = self.format_prompt(question)
199
+ inputs = self.tokenizer(prompt, return_tensors="pt").to(self.device)
200
+ with torch.no_grad():
201
+ outputs = self.model.generate(**inputs, max_new_tokens=max_new_tokens, use_cache=True)
202
+ answer = self.tokenizer.batch_decode(outputs, skip_special_tokens=True)[0].strip()
203
+ return answer
204
+
205
+ if __name__ == "__main__":
206
+ assistant = MedicalAssistant()
207
+ question = '''
208
+ Symptoms:
209
+ Dizziness, headache, and nausea.
210
+
211
+ What is the differential diagnosis?
212
  '''
213
+ response = assistant.generate_response(question)
214
+ print(response)
215
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
216
  ```
217
+
218
+ ## Ollama
219
+ This model is now also available on Ollama. You can use it by running the command ```ollama run monotykamary/medichat-llama3``` in your
220
+ terminal. If you have limited computing resources, check out this [video](https://www.youtube.com/watch?v=Qa1h7ygwQq8) to learn how to run it on
221
+ a Google Colab backend.