Omnibus commited on
Commit
f13ca78
1 Parent(s): 422ba93

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -3
app.py CHANGED
@@ -41,7 +41,20 @@ def format_prompt_gemma(message, history,cust_p):
41
  #prompt += f"<start_of_turn>user{message}<end_of_turn><start_of_turn>model"
42
  prompt+=cust_p.replace("USER_INPUT",message)
43
  return prompt
44
-
 
 
 
 
 
 
 
 
 
 
 
 
 
45
 
46
  def format_prompt_mixtral(message, history,cust_p):
47
  prompt = "<s>"
@@ -58,6 +71,8 @@ def format_prompt_choose(message, history, cust_p, model_name):
58
  return format_prompt_gemma(message,history,cust_p)
59
  if "mixtral" in models[model_name].lower():
60
  return format_prompt_mixtral(message,history,cust_p)
 
 
61
  else:
62
  return format_prompt_default(message,history,cust_p)
63
 
@@ -70,11 +85,12 @@ def load_models(inp):
70
  if "gemma" in models[inp].lower():
71
  prompt_out="<start_of_turn>userUSER_INPUT<end_of_turn><start_of_turn>model"
72
  return out_box,prompt_out, model_state
73
-
74
  if "mixtral" in models[inp].lower():
75
  prompt_out="[INST] USER_INPUT [/INST]"
76
  return out_box,prompt_out, model_state
77
-
 
 
78
  else:
79
  prompt_out="USER_INPUT\n"
80
  return out_box,prompt_out, model_state
 
41
  #prompt += f"<start_of_turn>user{message}<end_of_turn><start_of_turn>model"
42
  prompt+=cust_p.replace("USER_INPUT",message)
43
  return prompt
44
+ def format_prompt_openc(message, history,cust_p):
45
+ #prompt = "GPT4 Correct User: "
46
+ if history:
47
+ #<start_of_turn>userHow does the brain work?<end_of_turn><start_of_turn>model
48
+ for user_prompt, bot_response in history:
49
+ prompt += f"{user_prompt}"
50
+ prompt += f"<|end_of_turn|>"
51
+ prompt += f"GPT4 Correct Assistant: "
52
+ prompt += f"{bot_response}"
53
+ prompt += f"<|end_of_turn|>"
54
+ print(prompt)
55
+ #GPT4 Correct User: Hello<|end_of_turn|>GPT4 Correct Assistant:
56
+ prompt+=cust_p.replace("USER_INPUT",message)
57
+ return prompt
58
 
59
  def format_prompt_mixtral(message, history,cust_p):
60
  prompt = "<s>"
 
71
  return format_prompt_gemma(message,history,cust_p)
72
  if "mixtral" in models[model_name].lower():
73
  return format_prompt_mixtral(message,history,cust_p)
74
+ if "openchat" in models[model_name].lower():
75
+ return format_prompt_openc(message,history,cust_p)
76
  else:
77
  return format_prompt_default(message,history,cust_p)
78
 
 
85
  if "gemma" in models[inp].lower():
86
  prompt_out="<start_of_turn>userUSER_INPUT<end_of_turn><start_of_turn>model"
87
  return out_box,prompt_out, model_state
 
88
  if "mixtral" in models[inp].lower():
89
  prompt_out="[INST] USER_INPUT [/INST]"
90
  return out_box,prompt_out, model_state
91
+ if "openchat" in models[inp].lower():
92
+ prompt_out="GPT4 Correct User: USER_INPUT<|end_of_turn|>GPT4 Correct Assistant: "
93
+ return out_box,prompt_out, model_state
94
  else:
95
  prompt_out="USER_INPUT\n"
96
  return out_box,prompt_out, model_state