jfelipenc commited on
Commit
688da61
โ€ข
1 Parent(s): cf2e514

Updating app.py to use gaia minimed's model (first try)

Browse files
Files changed (1) hide show
  1. app.py +90 -16
app.py CHANGED
@@ -1,19 +1,63 @@
1
  import random
2
  import time
 
3
  import gradio as gr
 
 
 
4
 
5
- def placeholder(input, history):
6
- return "You typed: " + input
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
 
8
  class ChatbotInterface():
9
- def __init__(self, name):
10
  self.name = name
 
11
  self.chatbot = gr.Chatbot()
12
  self.chat_history = []
13
 
14
  with gr.Row() as row:
15
  row.justify = "end"
16
  self.msg = gr.Textbox(scale=7)
 
17
  self.submit = gr.Button("Submit", scale=1)
18
 
19
  clear = gr.ClearButton([self.msg, self.chatbot])
@@ -22,13 +66,48 @@ class ChatbotInterface():
22
  self.submit.click(self.respond, [self.msg, self.chatbot], [self.msg, self.chatbot])
23
 
24
  def respond(self, msg, history):
25
- bot_message = random.choice(["Hello, I'm MedChat! How can I help you?", "Hello there! I'm Medchat, a medical assistant! How can I help you?"])
26
- self.chat_history.append([msg, bot_message])
27
- time.sleep(1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  return "", self.chat_history
29
 
30
  if __name__ == "__main__":
31
- with gr.Blocks() as demo:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
  with gr.Row() as intro:
33
  gr.Markdown(
34
  """
@@ -47,14 +126,9 @@ if __name__ == "__main__":
47
  mistral_bot = ChatbotInterface("MistralMed")
48
  with gr.Tab("Falcon-7B") as falcon7b:
49
  falcon_bot = ChatbotInterface("Falcon-7B")
50
-
51
-
52
- def submit_to_all(value):
53
- for element in [gaia_bot, mistral_bot, falcon_bot]:
54
- element.msg.value = value
55
- element.submit.click()
56
-
57
- submit_all = gr.Button("Submit to All", scale=1)
58
- submit_all.click(submit_to_all, [gaia_bot.msg])
59
 
60
  demo.launch()
 
1
  import random
2
  import time
3
+ import torch
4
  import gradio as gr
5
+ from transformers import AutoConfig, AutoTokenizer, AutoModelForSeq2SeqLM, AutoModelForCausalLM, MistralForCausalLM
6
+ from peft import PeftModel, PeftConfig
7
+ from textwrap import wrap, fill
8
 
9
+ # Functions to Wrap the Prompt Correctly
10
+ def wrap_text(text, width=90):
11
+ lines = text.split('\n')
12
+ wrapped_lines = [fill(line, width=width) for line in lines]
13
+ wrapped_text = '\n'.join(wrapped_lines)
14
+ return wrapped_text
15
+
16
+ def multimodal_prompt(user_input, system_prompt):
17
+ """
18
+ Generates text using a large language model, given a user input and a system prompt.
19
+ Args:
20
+ user_input: The user's input text to generate a response for.
21
+ system_prompt: Optional system prompt.
22
+ Returns:
23
+ A string containing the generated text in the Falcon-like format.
24
+ """
25
+ # Combine user input and system prompt
26
+ formatted_input = f"{{{{ {system_prompt} }}}}\nUser: {user_input}\nFalcon:"
27
+
28
+ # Encode the input text
29
+ encodeds = tokenizer(formatted_input, return_tensors="pt", add_special_tokens=False)
30
+ model_inputs = encodeds.to(device)
31
+
32
+ # Generate a response using the model
33
+ output = peft_model.generate(
34
+ **model_inputs,
35
+ max_length=500,
36
+ use_cache=True,
37
+ early_stopping=False,
38
+ bos_token_id=peft_model.config.bos_token_id,
39
+ eos_token_id=peft_model.config.eos_token_id,
40
+ pad_token_id=peft_model.config.eos_token_id,
41
+ temperature=0.4,
42
+ do_sample=True
43
+ )
44
+
45
+ # Decode the response
46
+ response_text = tokenizer.decode(output[0], skip_special_tokens=True)
47
+
48
+ return response_text
49
 
50
  class ChatbotInterface():
51
+ def __init__(self, name, system_prompt="You are an expert medical analyst that helps users with any medical related information."):
52
  self.name = name
53
+ self.system_prompt = system_prompt
54
  self.chatbot = gr.Chatbot()
55
  self.chat_history = []
56
 
57
  with gr.Row() as row:
58
  row.justify = "end"
59
  self.msg = gr.Textbox(scale=7)
60
+ #self.msg.change(fn=, inputs=, outputs=)
61
  self.submit = gr.Button("Submit", scale=1)
62
 
63
  clear = gr.ClearButton([self.msg, self.chatbot])
 
66
  self.submit.click(self.respond, [self.msg, self.chatbot], [self.msg, self.chatbot])
67
 
68
  def respond(self, msg, history):
69
+ #bot_message = random.choice(["Hello, I'm MedChat! How can I help you?", "Hello there! I'm Medchat, a medical assistant! How can I help you?"])
70
+ formatted_input = f"{{{{ {self.system_prompt} }}}}\nUser: {msg}\n{self.name}:"
71
+ input_ids = tokenizer.encode(
72
+ formatted_input,
73
+ return_tensors="pt",
74
+ add_special_tokens=False
75
+ )
76
+ response = peft_model.generate(
77
+ input_ids=input_ids,
78
+ max_length=900,
79
+ use_cache=False,
80
+ early_stopping=False,
81
+ bos_token_id=peft_model.config.bos_token_id,
82
+ eos_token_id=peft_model.config.eos_token_id,
83
+ pad_token_id=peft_model.config.eos_token_id,
84
+ temperature=0.4,
85
+ do_sample=True
86
+ )
87
+ response_text = tokenizer.decode(response[0], skip_special_tokens=True)
88
+
89
+ self.chat_history.append([formatted_input, response_text])
90
+
91
  return "", self.chat_history
92
 
93
  if __name__ == "__main__":
94
+ # Define the device
95
+ device = "cuda" if torch.cuda.is_available() else "cpu"
96
+
97
+ # Use the base model's ID
98
+ base_model_id = "tiiuae/falcon-7b-instruct"
99
+ model_directory = "Tonic/GaiaMiniMed"
100
+
101
+ # Instantiate the Tokenizer
102
+ tokenizer = AutoTokenizer.from_pretrained(base_model_id, trust_remote_code=True, padding_side="left")
103
+
104
+ # Specify the configuration class for the model
105
+ model_config = AutoConfig.from_pretrained(base_model_id)
106
+ # Load the PEFT model with the specified configuration
107
+ peft_model = AutoModelForCausalLM.from_pretrained(model_directory, config=model_config)
108
+ peft_model = PeftModel.from_pretrained(peft_model, model_directory)
109
+
110
+ with gr.Blocks() as demo:
111
  with gr.Row() as intro:
112
  gr.Markdown(
113
  """
 
126
  mistral_bot = ChatbotInterface("MistralMed")
127
  with gr.Tab("Falcon-7B") as falcon7b:
128
  falcon_bot = ChatbotInterface("Falcon-7B")
129
+
130
+ gaia_bot.msg.change(fn=lambda s: (s[::1], s[::1]), inputs=gaia_bot.msg, outputs=[mistral_bot.msg, falcon_bot.msg])
131
+ mistral_bot.msg.change(fn=lambda s: (s[::1], s[::1]), inputs=mistral_bot.msg, outputs=[gaia_bot.msg, falcon_bot.msg])
132
+ falcon_bot.msg.change(fn=lambda s: (s[::1], s[::1]), inputs=falcon_bot.msg, outputs=[gaia_bot.msg, mistral_bot.msg])
 
 
 
 
 
133
 
134
  demo.launch()