AFischer1985 commited on
Commit
634803e
1 Parent(s): 73c9156

update extend_prompt

Browse files
Files changed (1) hide show
  1. run.py +19 -5
run.py CHANGED
@@ -2,7 +2,7 @@
2
  # Title: Gradio AI-Interface with Memory-RAG
3
  # Author: Andreas Fischer
4
  # Date: October 15th, 2023
5
- # Last update: March 1st, 2024
6
  ##########################################################################################
7
 
8
  #https://github.com/abetlen/llama-cpp-python/issues/306
@@ -139,12 +139,22 @@ def extend_prompt(message="", history=None, system=None, RAGAddon=None, system2=
139
  template0=" [INST]{system}\n [/INST] </s>"
140
  template1=" [INST] {message} [/INST]"
141
  template2=" {response}</s>"
 
 
 
 
 
142
  if("Gemma-" in modelPath): # https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1
143
  template0="<start_of_turn>user{system}</end_of_turn>"
144
  template1="<start_of_turn>user{message}</end_of_turn><start_of_turn>model"
145
- template2="{response}</end_of_turn>"
146
- if("Mixtral-8x7b-instruct" in modelPath): # https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1
147
  startOfString="<s>"
 
 
 
 
 
148
  template0=" [INST]{system}\n [/INST] </s>"
149
  template1=" [INST] {message} [/INST]"
150
  template2=" {response}</s>"
@@ -160,7 +170,11 @@ def extend_prompt(message="", history=None, system=None, RAGAddon=None, system2=
160
  if(("Discolm_german_7b" in modelPath) or ("SauerkrautLM-7b-HerO" in modelPath)): #https://huggingface.co/VAGOsolutions/SauerkrautLM-7b-HerO
161
  template0="<|im_start|>system\n{system}<|im_end|>\n"
162
  template1="<|im_start|>user\n{message}<|im_end|>\n<|im_start|>assistant\n"
163
- template2="{response}<|im_end|>\n"
 
 
 
 
164
  if("WizardLM-13B-V1.2" in modelPath): #https://huggingface.co/WizardLM/WizardLM-13B-V1.2
165
  template0="{system} " #<s>
166
  template1="USER: {message} ASSISTANT: "
@@ -178,7 +192,7 @@ def extend_prompt(message="", history=None, system=None, RAGAddon=None, system2=
178
  for user_message, bot_response in history[-historylimit:]:
179
  if user_message is None: user_message = ""
180
  if bot_response is None: bot_response = ""
181
- bot_response = re.sub("\n\n<details>((.|\n)*?)</details>","", bot_response) # remove RAG-compontents
182
  if removeHTML==True: bot_response = re.sub("<(.*?)>","\n", bot_response) # remove HTML-components in general (may cause bugs with markdown-rendering)
183
  if user_message is not None: prompt += template1.format(message=user_message[:zeichenlimit])
184
  if bot_response is not None: prompt += template2.format(response=bot_response[:zeichenlimit])
 
2
  # Title: Gradio AI-Interface with Memory-RAG
3
  # Author: Andreas Fischer
4
  # Date: October 15th, 2023
5
+ # Last update: May 27th, 2024
6
  ##########################################################################################
7
 
8
  #https://github.com/abetlen/llama-cpp-python/issues/306
 
139
  template0=" [INST]{system}\n [/INST] </s>"
140
  template1=" [INST] {message} [/INST]"
141
  template2=" {response}</s>"
142
+ if("command-r" in modelPath): #https://huggingface.co/CohereForAI/c4ai-command-r-v01
143
+ ## <BOS_TOKEN><|START_OF_TURN_TOKEN|><|USER_TOKEN|>Hello, how are you?<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>
144
+ template0="<BOS_TOKEN><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|> {system}<|END_OF_TURN_TOKEN|>"
145
+ template1="<|START_OF_TURN_TOKEN|><|USER_TOKEN|>{message}<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>"
146
+ template2="{response}<|END_OF_TURN_TOKEN|>"
147
  if("Gemma-" in modelPath): # https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1
148
  template0="<start_of_turn>user{system}</end_of_turn>"
149
  template1="<start_of_turn>user{message}</end_of_turn><start_of_turn>model"
150
+ template2="{response}</end_of_turn>"
151
+ if("Mixtral-8x22B-Instruct" in modelPath): # AutoTokenizer: <s>[INST] U1[/INST] A1</s>[INST] U2[/INST] A2</s>
152
  startOfString="<s>"
153
+ template0="[INST]{system}\n [/INST] </s>"
154
+ template1="[INST] {message}[/INST]"
155
+ template2=" {response}</s>"
156
+ if("Mixtral-8x7b-instruct" in modelPath): # https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1
157
+ startOfString="<s>" # AutoTokenzizer: <s> [INST] U1 [/INST]A1</s> [INST] U2 [/INST]A2</s>
158
  template0=" [INST]{system}\n [/INST] </s>"
159
  template1=" [INST] {message} [/INST]"
160
  template2=" {response}</s>"
 
170
  if(("Discolm_german_7b" in modelPath) or ("SauerkrautLM-7b-HerO" in modelPath)): #https://huggingface.co/VAGOsolutions/SauerkrautLM-7b-HerO
171
  template0="<|im_start|>system\n{system}<|im_end|>\n"
172
  template1="<|im_start|>user\n{message}<|im_end|>\n<|im_start|>assistant\n"
173
+ template2="{response}<|im_end|>\n"
174
+ if("Llama-3-SauerkrautLM-8b-Instruct" in modelPath): #https://huggingface.co/VAGOsolutions/SauerkrautLM-7b-HerO
175
+ template0="<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n{system}<|eot_id|>"
176
+ template1="<|start_header_id|>user<|end_header_id|>\n\n{message}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n"
177
+ template2="{response}<|eot_id|>\n"
178
  if("WizardLM-13B-V1.2" in modelPath): #https://huggingface.co/WizardLM/WizardLM-13B-V1.2
179
  template0="{system} " #<s>
180
  template1="USER: {message} ASSISTANT: "
 
192
  for user_message, bot_response in history[-historylimit:]:
193
  if user_message is None: user_message = ""
194
  if bot_response is None: bot_response = ""
195
+ bot_response = re.sub("\n\n<details>.*?</details>","", bot_response, flags=re.DOTALL) # remove RAG-compontents
196
  if removeHTML==True: bot_response = re.sub("<(.*?)>","\n", bot_response) # remove HTML-components in general (may cause bugs with markdown-rendering)
197
  if user_message is not None: prompt += template1.format(message=user_message[:zeichenlimit])
198
  if bot_response is not None: prompt += template2.format(response=bot_response[:zeichenlimit])