PEFT
English
medical
Tonic commited on
Commit
1845a8d
โ€ข
1 Parent(s): 4fee1a9

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +5 -43
README.md CHANGED
@@ -73,8 +73,6 @@ import gradio as gr
73
  import random
74
  from textwrap import wrap
75
 
76
-
77
- # Functions to Wrap the Prompt Correctly
78
  def wrap_text(text, width=90):
79
  lines = text.split('\n')
80
  wrapped_lines = [textwrap.fill(line, width=width) for line in lines]
@@ -82,22 +80,11 @@ def wrap_text(text, width=90):
82
  return wrapped_text
83
 
84
  def multimodal_prompt(user_input, system_prompt="You are an expert medical analyst:"):
85
- """
86
- Generates text using a large language model, given a user input and a system prompt.
87
- Args:
88
- user_input: The user's input text to generate a response for.
89
- system_prompt: Optional system prompt.
90
- Returns:
91
- A string containing the generated text.
92
- """
93
- # Combine user input and system prompt
94
  formatted_input = f"<s>[INST]{system_prompt} {user_input}[/INST]"
95
 
96
- # Encode the input text
97
  encodeds = tokenizer(formatted_input, return_tensors="pt", add_special_tokens=False)
98
  model_inputs = encodeds.to(device)
99
 
100
- # Generate a response using the model
101
  output = model.generate(
102
  **model_inputs,
103
  max_length=max_length,
@@ -110,31 +97,19 @@ def multimodal_prompt(user_input, system_prompt="You are an expert medical analy
110
  do_sample=True
111
  )
112
 
113
- # Decode the response
114
  response_text = tokenizer.decode(output[0], skip_special_tokens=True)
115
 
116
  return response_text
117
 
118
- # Define the device
119
  device = "cuda" if torch.cuda.is_available() else "cpu"
120
 
121
- # Use the base model's ID
122
  base_model_id = "mistralai/Mistral-7B-v0.1"
123
  model_directory = "Tonic/mistralmed"
124
 
125
- # Instantiate the Tokenizer
126
  tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1", trust_remote_code=True, padding_side="left")
127
- # tokenizer = AutoTokenizer.from_pretrained("Tonic/mistralmed", trust_remote_code=True, padding_side="left")
128
  tokenizer.pad_token = tokenizer.eos_token
129
  tokenizer.padding_side = 'left'
130
 
131
- # Specify the configuration class for the model
132
- #model_config = AutoConfig.from_pretrained(base_model_id)
133
-
134
- # Load the PEFT model with the specified configuration
135
- #peft_model = AutoModelForCausalLM.from_pretrained(base_model_id, config=model_config)
136
-
137
- # Load the PEFT model
138
  peft_config = PeftConfig.from_pretrained("Tonic/mistralmed", token="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")
139
  peft_model = MistralForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1", trust_remote_code=True)
140
  peft_model = PeftModel.from_pretrained(peft_model, "Tonic/mistralmed", token="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")
@@ -144,40 +119,27 @@ class ChatBot:
144
  self.history = []
145
 
146
  def predict(self, user_input, system_prompt="You are an expert medical analyst:"):
147
- # Combine user input and system prompt
148
  formatted_input = f"<s>[INST]{system_prompt} {user_input}[/INST]"
149
 
150
- # Encode user input
151
  user_input_ids = tokenizer.encode(formatted_input, return_tensors="pt")
152
 
153
- # Concatenate the user input with chat history
154
- if len(self.history) > 0:
155
- chat_history_ids = torch.cat([self.history, user_input_ids], dim=-1)
156
- else:
157
- chat_history_ids = user_input_ids
158
-
159
- # Generate a response using the PEFT model
160
- response = peft_model.generate(input_ids=chat_history_ids, max_length=512, pad_token_id=tokenizer.eos_token_id)
161
-
162
- # Update chat history
163
- self.history = chat_history_ids
164
 
165
- # Decode and return the response
166
  response_text = tokenizer.decode(response[0], skip_special_tokens=True)
167
  return response_text
168
 
169
  bot = ChatBot()
170
 
171
- title = "๐Ÿ‘‹๐ŸปWelcome to Tonic's MistralMed Chat๐Ÿš€"
172
- description = "You can use this Space to test out the current model (MistralMed) or duplicate this Space and use it for any other model on ๐Ÿค—HuggingFace. Join me on Discord to build together."
173
- examples = [["What is the proper treatment for buccal herpes?", "Please provide information on the most effective antiviral medications and home remedies for treating buccal herpes."]]
174
 
175
  iface = gr.Interface(
176
  fn=bot.predict,
177
  title=title,
178
  description=description,
179
  examples=examples,
180
- inputs=["text", "text"], # Take user input and system prompt separately
181
  outputs="text",
182
  theme="ParityError/Anime"
183
  )
 
73
  import random
74
  from textwrap import wrap
75
 
 
 
76
  def wrap_text(text, width=90):
77
  lines = text.split('\n')
78
  wrapped_lines = [textwrap.fill(line, width=width) for line in lines]
 
80
  return wrapped_text
81
 
82
  def multimodal_prompt(user_input, system_prompt="You are an expert medical analyst:"):
 
 
 
 
 
 
 
 
 
83
  formatted_input = f"<s>[INST]{system_prompt} {user_input}[/INST]"
84
 
 
85
  encodeds = tokenizer(formatted_input, return_tensors="pt", add_special_tokens=False)
86
  model_inputs = encodeds.to(device)
87
 
 
88
  output = model.generate(
89
  **model_inputs,
90
  max_length=max_length,
 
97
  do_sample=True
98
  )
99
 
 
100
  response_text = tokenizer.decode(output[0], skip_special_tokens=True)
101
 
102
  return response_text
103
 
 
104
  device = "cuda" if torch.cuda.is_available() else "cpu"
105
 
 
106
  base_model_id = "mistralai/Mistral-7B-v0.1"
107
  model_directory = "Tonic/mistralmed"
108
 
 
109
  tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1", trust_remote_code=True, padding_side="left")
 
110
  tokenizer.pad_token = tokenizer.eos_token
111
  tokenizer.padding_side = 'left'
112
 
 
 
 
 
 
 
 
113
  peft_config = PeftConfig.from_pretrained("Tonic/mistralmed", token="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")
114
  peft_model = MistralForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1", trust_remote_code=True)
115
  peft_model = PeftModel.from_pretrained(peft_model, "Tonic/mistralmed", token="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")
 
119
  self.history = []
120
 
121
  def predict(self, user_input, system_prompt="You are an expert medical analyst:"):
 
122
  formatted_input = f"<s>[INST]{system_prompt} {user_input}[/INST]"
123
 
 
124
  user_input_ids = tokenizer.encode(formatted_input, return_tensors="pt")
125
 
126
+ response = peft_model.generate(input_ids=user_input_ids, max_length=512, pad_token_id=tokenizer.eos_token_id)
 
 
 
 
 
 
 
 
 
 
127
 
 
128
  response_text = tokenizer.decode(response[0], skip_special_tokens=True)
129
  return response_text
130
 
131
  bot = ChatBot()
132
 
133
+ title = "๐Ÿ‘‹๐Ÿปํ† ๋‹‰์˜ ๋ฏธ์ŠคํŠธ๋ž„๋ฉ”๋“œ ์ฑ„ํŒ…์— ์˜ค์‹  ๊ฒƒ์„ ํ™˜์˜ํ•ฉ๋‹ˆ๋‹ค๐Ÿš€๐Ÿ‘‹๐ŸปWelcome to Tonic's MistralMed Chat๐Ÿš€"
134
+ description = "์ด ๊ณต๊ฐ„์„ ์‚ฌ์šฉํ•˜์—ฌ ํ˜„์žฌ ๋ชจ๋ธ์„ ํ…Œ์ŠคํŠธํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. [(Tonic/MistralMed)](https://huggingface.co/Tonic/MistralMed) ๋˜๋Š” ์ด ๊ณต๊ฐ„์„ ๋ณต์ œํ•˜๊ณ  ๋กœ์ปฌ ๋˜๋Š” ๐Ÿค—HuggingFace์—์„œ ์‚ฌ์šฉํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. [Discord์—์„œ ํ•จ๊ป˜ ๋งŒ๋“ค๊ธฐ ์œ„ํ•ด Discord์— ๊ฐ€์ž…ํ•˜์‹ญ์‹œ์˜ค](https://discord.gg/VqTxc76K3u). You can use this Space to test out the current model [(Tonic/MistralMed)](https://huggingface.co/Tonic/MistralMed) or duplicate this Space and use it locally or on ๐Ÿค—HuggingFace. [Join me on Discord to build together](https://discord.gg/VqTxc76K3u)."
135
+ examples = [["[Question:] What is the proper treatment for buccal herpes?", "You are a medicine and public health expert, you will receive a question, answer the question, and complete the answer"]]
136
 
137
  iface = gr.Interface(
138
  fn=bot.predict,
139
  title=title,
140
  description=description,
141
  examples=examples,
142
+ inputs=["text", "text"],
143
  outputs="text",
144
  theme="ParityError/Anime"
145
  )