Ctaake commited on
Commit
7fa2dc3
1 Parent(s): 21f0218

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -2
app.py CHANGED
@@ -33,7 +33,15 @@ if checkpoint in mistral_models:
33
  chat_template = chat_template.replace(' ', '').replace('\n', '')
34
  tokenizer.chat_template = chat_template
35
 
36
-
 
 
 
 
 
 
 
 
37
 
38
  def format_prompt_cohere(message, chatbot, system_prompt=SYSTEM_PROMPT):
39
  messages = [{"role": "system", "content": system_prompt}]
@@ -70,7 +78,7 @@ match checkpoint:
70
  case "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO":
71
  format_prompt=format_prompt_nous
72
  case "mistralai/Mixtral-8x7B-Instruct-v0.1":
73
- format_prompt=format_prompt_cohere
74
 
75
  def inference(message, history, temperature=0.9, maxTokens=512, topP=0.9, repPenalty=1.1):
76
  # Updating the settings for the generation
 
33
  chat_template = chat_template.replace(' ', '').replace('\n', '')
34
  tokenizer.chat_template = chat_template
35
 
36
+ def format_prompt_mistral(message, chatbot, system_prompt=SYSTEM_PROMPT+SYSTEM_PROMPT_NOUS):
37
+ messages = [{"role": "system", "content": system_prompt}]
38
+ for user_message, bot_message in chatbot:
39
+ messages.append({"role": "user", "content": user_message})
40
+ messages.append({"role": "assistant", "content": bot_message})
41
+ messages.append({"role": "user", "content": message})
42
+ newPrompt = tokenizer.apply_chat_template(
43
+ messages, tokenize=False, add_generation_prompt=True, return_tensors="pt")
44
+ return newPrompt
45
 
46
  def format_prompt_cohere(message, chatbot, system_prompt=SYSTEM_PROMPT):
47
  messages = [{"role": "system", "content": system_prompt}]
 
78
  case "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO":
79
  format_prompt=format_prompt_nous
80
  case "mistralai/Mixtral-8x7B-Instruct-v0.1":
81
+ format_prompt=format_prompt_mistral
82
 
83
  def inference(message, history, temperature=0.9, maxTokens=512, topP=0.9, repPenalty=1.1):
84
  # Updating the settings for the generation