Tonic commited on
Commit
d402103
1 Parent(s): 34723ea

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -29,7 +29,7 @@ class IntelChatBot:
29
  prompt = f"### System:\n {self.system_message}\n ### User:\n{user_message}\n### System:\n"
30
  return prompt
31
 
32
- def predict(self, user_message, temperature, max_new_tokens, top_p, repetition_penalty, do_sample):
33
  prompt = self.format_prompt(user_message)
34
  inputs = self.tokenizer(prompt, return_tensors='pt', add_special_tokens=False)
35
  input_ids = inputs["input_ids"].to(self.model.device)
@@ -50,14 +50,14 @@ class IntelChatBot:
50
  response = self.tokenizer.decode(output_ids[0], skip_special_tokens=True)
51
  return response
52
 
53
- def gradio_predict(user_message, system_message, max_new_tokens, temperature, top_p, repetition_penalty, do_sample):
54
  Intel_bot.set_system_message(system_message)
55
  if not do_sample:
56
  max_length = 780
57
  temperature = 0.9
58
  top_p = 0.9
59
  repetition_penalty = 0.9
60
- response = Intel_bot.predict(user_message, temperature, max_new_tokens, top_p, repetition_penalty, do_sample)
61
  return response
62
 
63
  Intel_bot = IntelChatBot(model, tokenizer)
@@ -83,7 +83,7 @@ with gr.Blocks(theme = "ParityError/Anime") as demo:
83
  output_text = gr.Textbox(label="🧠🤌🏻NeuralChat Response")
84
 
85
  def process(user_message, system_message, max_new_tokens, temperature, top_p, repetition_penalty, do_sample):
86
- return gradio_predict(user_message, system_message, max_new_tokens, temperature, top_p, repetition_penalty, do_sample)
87
 
88
  submit_button.click(
89
  process,
 
29
  prompt = f"### System:\n {self.system_message}\n ### User:\n{user_message}\n### System:\n"
30
  return prompt
31
 
32
+ def Neuro(self, user_message, temperature, max_new_tokens, top_p, repetition_penalty, do_sample):
33
  prompt = self.format_prompt(user_message)
34
  inputs = self.tokenizer(prompt, return_tensors='pt', add_special_tokens=False)
35
  input_ids = inputs["input_ids"].to(self.model.device)
 
50
  response = self.tokenizer.decode(output_ids[0], skip_special_tokens=True)
51
  return response
52
 
53
+ def gradio_neuro(user_message, system_message, max_new_tokens, temperature, top_p, repetition_penalty, do_sample):
54
  Intel_bot.set_system_message(system_message)
55
  if not do_sample:
56
  max_length = 780
57
  temperature = 0.9
58
  top_p = 0.9
59
  repetition_penalty = 0.9
60
+ response = Intel_bot.neuro(user_message, temperature, max_new_tokens, top_p, repetition_penalty, do_sample)
61
  return response
62
 
63
  Intel_bot = IntelChatBot(model, tokenizer)
 
83
  output_text = gr.Textbox(label="🧠🤌🏻NeuralChat Response")
84
 
85
  def process(user_message, system_message, max_new_tokens, temperature, top_p, repetition_penalty, do_sample):
86
+ return gradio_neuro(user_message, system_message, max_new_tokens, temperature, top_p, repetition_penalty, do_sample)
87
 
88
  submit_button.click(
89
  process,