Tonic commited on
Commit
b1cc2f7
1 Parent(s): 4cd4305

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -19
app.py CHANGED
@@ -19,10 +19,10 @@ def multimodal_prompt(user_input, system_prompt="You are an expert medical analy
19
  user_input: The user's input text to generate a response for.
20
  system_prompt: Optional system prompt.
21
  Returns:
22
- A string containing the generated text.
23
  """
24
  # Combine user input and system prompt
25
- formatted_input = f"Falcon: {system_prompt} \n User: {user_input} \n Falcon:"
26
 
27
  # Encode the input text
28
  encodeds = tokenizer(formatted_input, return_tensors="pt", add_special_tokens=False)
@@ -31,13 +31,13 @@ def multimodal_prompt(user_input, system_prompt="You are an expert medical analy
31
  # Generate a response using the model
32
  output = model.generate(
33
  **model_inputs,
34
- max_length=max_length,
35
  use_cache=True,
36
  early_stopping=True,
37
  bos_token_id=model.config.bos_token_id,
38
  eos_token_id=model.config.eos_token_id,
39
  pad_token_id=model.config.eos_token_id,
40
- temperature=0.1,
41
  do_sample=True
42
  )
43
 
@@ -64,7 +64,8 @@ tokenizer.padding_side = 'left'
64
  peft_config = PeftConfig.from_pretrained("Tonic/GaiaMiniMed")
65
 
66
  # Using Optimum
67
- peft_model.to_bettertransformer()
 
68
  peft_model = AutoModelForCausalLM.from_pretrained("tiiuae/falcon-7b-instruct")
69
  peft_model = PeftModel.from_pretrained(peft_model, "Tonic/GaiaMiniMed")
70
 
@@ -80,28 +81,28 @@ peft_model = PeftModel.from_pretrained(peft_model, "Tonic/GaiaMiniMed")
80
  # peft_model = PeftModel.from_pretrained(peft_model, "Tonic/mistralmed", token="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")
81
 
82
  class ChatBot:
83
- def __init__(self):
84
- self.history = []
85
-
86
- class ChatBot:
87
- def __init__(self):
88
- # Initialize the ChatBot class with an empty history
89
  self.history = []
90
 
91
- def predict(self, user_input, system_prompt="You are an expert medical analyst:"):
92
- # Combine the user's input with the system prompt
93
- formatted_input = f"{{{ {system_prompt} }}} \n User: {user_input} \n Falcon:"
94
 
95
  # Encode the formatted input using the tokenizer
96
- user_input_ids = tokenizer.encode(formatted_input, return_tensors="pt")
97
 
98
- # Generate a response using the PEFT model
99
- response = peft_model.generate(input_ids=user_input_ids, max_length=512, pad_token_id=tokenizer.eos_token_id)
100
 
101
  # Decode the generated response to text
102
  response_text = tokenizer.decode(response[0], skip_special_tokens=True)
103
-
104
- return response_text # Return the generated response
 
 
 
 
105
 
106
  bot = ChatBot()
107
 
 
19
  user_input: The user's input text to generate a response for.
20
  system_prompt: Optional system prompt.
21
  Returns:
22
+ A string containing the generated text in the Falcon-like format.
23
  """
24
  # Combine user input and system prompt
25
+ formatted_input = f"{{{{ {system_prompt} }}}}\nUser: {user_input}\nFalcon:"
26
 
27
  # Encode the input text
28
  encodeds = tokenizer(formatted_input, return_tensors="pt", add_special_tokens=False)
 
31
  # Generate a response using the model
32
  output = model.generate(
33
  **model_inputs,
34
+ max_length=500,
35
  use_cache=True,
36
  early_stopping=True,
37
  bos_token_id=model.config.bos_token_id,
38
  eos_token_id=model.config.eos_token_id,
39
  pad_token_id=model.config.eos_token_id,
40
+ temperature=0.4,
41
  do_sample=True
42
  )
43
 
 
64
  peft_config = PeftConfig.from_pretrained("Tonic/GaiaMiniMed")
65
 
66
  # Using Optimum
67
+ model.to_bettertransformer()
68
+
69
  peft_model = AutoModelForCausalLM.from_pretrained("tiiuae/falcon-7b-instruct")
70
  peft_model = PeftModel.from_pretrained(peft_model, "Tonic/GaiaMiniMed")
71
 
 
81
  # peft_model = PeftModel.from_pretrained(peft_model, "Tonic/mistralmed", token="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")
82
 
83
  class ChatBot:
84
+ def __init__(self, system_prompt="You are an expert medical analyst:"):
85
+ self.system_prompt = system_prompt
 
 
 
 
86
  self.history = []
87
 
88
+ def predict(self, user_input):
89
+ # Combine the user's input with the system prompt in Falcon format
90
+ formatted_input = f"{{{{ {self.system_prompt} }}}}\nUser: {user_input}\nFalcon:"
91
 
92
  # Encode the formatted input using the tokenizer
93
+ input_ids = tokenizer.encode(formatted_input, return_tensors="pt", add_special_tokens=False)
94
 
95
+ # Generate a response using the model
96
+ response = model.generate(input_ids, max_length=max_length, use_cache=True, early_stopping=True, bos_token_id=model.config.bos_token_id, eos_token_id=model.config.eos_token_id, pad_token_id=model.config.eos_token_id, temperature=0.1, do_sample=True)
97
 
98
  # Decode the generated response to text
99
  response_text = tokenizer.decode(response[0], skip_special_tokens=True)
100
+
101
+ # Append the Falcon-like conversation to the history
102
+ self.history.append(formatted_input)
103
+ self.history.append(response_text)
104
+
105
+ return response_text
106
 
107
  bot = ChatBot()
108