Tonic commited on
Commit
3202d1b
1 Parent(s): e2e23fc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -14
app.py CHANGED
@@ -7,26 +7,25 @@ from transformers import AutoConfig, AutoTokenizer, AutoModelForSeq2SeqLM, AutoM
7
  from peft import PeftModel, PeftConfig
8
  import torch
9
  import gradio as gr
10
- # Functions to Wrap the Prompt Correctly
11
 
 
12
  def wrap_text(text, width=90):
13
  lines = text.split('\n')
14
  wrapped_lines = [textwrap.fill(line, width=width) for line in lines]
15
  wrapped_text = '\n'.join(wrapped_lines)
16
  return wrapped_text
17
 
18
- def multimodal_prompt(input_text, system_prompt="", max_length=512):
19
  """
20
- Generates text using a large language model, given a prompt and a device.
21
  Args:
22
- input_text: The input text to generate a response for.
23
  system_prompt: Optional system prompt.
24
- max_length: Maximum length of the generated text.
25
  Returns:
26
  A string containing the generated text.
27
  """
28
- # Modify the input text to include the desired format
29
- formatted_input = f"""<s>[INST]{input_text}[/INST]"""
30
 
31
  # Encode the input text
32
  encodeds = tokenizer(formatted_input, return_tensors="pt", add_special_tokens=False)
@@ -50,7 +49,6 @@ def multimodal_prompt(input_text, system_prompt="", max_length=512):
50
 
51
  return response_text
52
 
53
-
54
  # Define the device
55
  device = "cuda" if torch.cuda.is_available() else "cpu"
56
 
@@ -64,7 +62,6 @@ tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1", trust_rem
64
  tokenizer.pad_token = tokenizer.eos_token
65
  tokenizer.padding_side = 'left'
66
 
67
-
68
  # Specify the configuration class for the model
69
  #model_config = AutoConfig.from_pretrained(base_model_id)
70
 
@@ -80,9 +77,12 @@ class ChatBot:
80
  def __init__(self):
81
  self.history = []
82
 
83
- def predict(self, input_text):
 
 
 
84
  # Encode user input
85
- user_input_ids = tokenizer.encode(input_text, return_tensors="pt")
86
 
87
  # Concatenate the user input with chat history
88
  if len(self.history) > 0:
@@ -104,15 +104,14 @@ bot = ChatBot()
104
 
105
  title = "👋🏻Welcome to Tonic's MistralMed Chat🚀"
106
  description = "You can use this Space to test out the current model (MistralMed) or duplicate this Space and use it for any other model on 🤗HuggingFace. Join me on Discord to build together."
107
- examples = [["<s>[INST] What is the proper treatment for bucal herpes?[/INST]"]]
108
-
109
 
110
  iface = gr.Interface(
111
  fn=bot.predict,
112
  title=title,
113
  description=description,
114
  examples=examples,
115
- inputs="text",
116
  outputs="text",
117
  theme="ParityError/Anime"
118
  )
 
7
  from peft import PeftModel, PeftConfig
8
  import torch
9
  import gradio as gr
 
10
 
11
+ # Functions to Wrap the Prompt Correctly
12
  def wrap_text(text, width=90):
13
  lines = text.split('\n')
14
  wrapped_lines = [textwrap.fill(line, width=width) for line in lines]
15
  wrapped_text = '\n'.join(wrapped_lines)
16
  return wrapped_text
17
 
18
+ def multimodal_prompt(user_input, system_prompt="You are an expert medical analyst:"):
19
  """
20
+ Generates text using a large language model, given a user input and a system prompt.
21
  Args:
22
+ user_input: The user's input text to generate a response for.
23
  system_prompt: Optional system prompt.
 
24
  Returns:
25
  A string containing the generated text.
26
  """
27
+ # Combine user input and system prompt
28
+ formatted_input = f"<s>[INST]{system_prompt} {user_input}[/INST]"
29
 
30
  # Encode the input text
31
  encodeds = tokenizer(formatted_input, return_tensors="pt", add_special_tokens=False)
 
49
 
50
  return response_text
51
 
 
52
  # Define the device
53
  device = "cuda" if torch.cuda.is_available() else "cpu"
54
 
 
62
  tokenizer.pad_token = tokenizer.eos_token
63
  tokenizer.padding_side = 'left'
64
 
 
65
  # Specify the configuration class for the model
66
  #model_config = AutoConfig.from_pretrained(base_model_id)
67
 
 
77
  def __init__(self):
78
  self.history = []
79
 
80
+ def predict(self, user_input, system_prompt="You are an expert medical analyst:"):
81
+ # Combine user input and system prompt
82
+ formatted_input = f"<s>[INST]{system_prompt} {user_input}[/INST]"
83
+
84
  # Encode user input
85
+ user_input_ids = tokenizer.encode(formatted_input, return_tensors="pt")
86
 
87
  # Concatenate the user input with chat history
88
  if len(self.history) > 0:
 
104
 
105
  title = "👋🏻Welcome to Tonic's MistralMed Chat🚀"
106
  description = "You can use this Space to test out the current model (MistralMed) or duplicate this Space and use it for any other model on 🤗HuggingFace. Join me on Discord to build together."
107
+ examples = [["What is the proper treatment for buccal herpes?"]]
 
108
 
109
  iface = gr.Interface(
110
  fn=bot.predict,
111
  title=title,
112
  description=description,
113
  examples=examples,
114
+ inputs=["text", "text"], # Take user input and system prompt separately
115
  outputs="text",
116
  theme="ParityError/Anime"
117
  )