AFischer1985 commited on
Commit
125389b
1 Parent(s): 8d8b439

Update run.py

Browse files
Files changed (1) hide show
  1. run.py +55 -8
run.py CHANGED
@@ -73,13 +73,60 @@ client = InferenceClient(
73
  import gradio as gr
74
  import json
75
 
76
- def format_prompt(message, history):
77
- prompt = "<s>"
78
- #for user_prompt, bot_response in history:
79
- # prompt += f"[INST] {user_prompt} [/INST]"
80
- # prompt += f" {bot_response}</s> "
81
- prompt += f"[INST] {message} [/INST]"
82
- return prompt
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83
 
84
  def response(
85
  prompt, history, temperature=0.9, max_new_tokens=500, top_p=0.95, repetition_penalty=1.0,
@@ -112,7 +159,7 @@ def response(
112
  addon=" Bitte berücksichtige bei deiner Antwort ggf. folgende Auszüge aus unserer Datenbank, sofern sie für die Antwort erforderlich sind. Beantworte die Frage knapp und präzise. Ignoriere unpassende Datenbank-Auszüge OHNE sie zu kommentieren, zu erwähnen oder aufzulisten:\n"+"\n".join(results)
113
  system="Du bist ein deutschsprachiges KI-basiertes Assistenzsystem, das zu jedem Anliegen möglichst geeignete KI-Tools empfiehlt."+addon+"\n\nUser-Anliegen:"
114
  #body={"prompt":system+"### Instruktion:\n"+message+"\n\n### Antwort:","max_tokens":500, "echo":"False","stream":"True"} #e.g. SauerkrautLM
115
- formatted_prompt = format_prompt(system+"\n"+prompt, history)
116
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
117
  output = ""
118
  for response in stream:
 
73
  import gradio as gr
74
  import json
75
 
76
+ def extend_prompt(message="", history=None, system=None, RAGAddon=None, system2=None, zeichenlimit=None,historylimit=4, removeHTML=False):
77
+ startOfString=""
78
+ if zeichenlimit is None: zeichenlimit=1000000000 # :-)
79
+ template0=" [INST]{system}\n [/INST] </s>"
80
+ template1=" [INST] {message} [/INST]"
81
+ template2=" {response}</s>"
82
+ if("Gemma-" in modelPath): # https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1
83
+ template0="<start_of_turn>user{system}</end_of_turn>"
84
+ template1="<start_of_turn>user{message}</end_of_turn><start_of_turn>model"
85
+ template2="{response}</end_of_turn>"
86
+ if("Mixtral-8x7b-instruct" in modelPath): # https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1
87
+ startOfString="<s>"
88
+ template0=" [INST]{system}\n [/INST] </s>"
89
+ template1=" [INST] {message} [/INST]"
90
+ template2=" {response}</s>"
91
+ if("Mistral-7B-Instruct" in modelPath): #https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2
92
+ startOfString="<s>"
93
+ template0="[INST]{system}\n [/INST]</s>"
94
+ template1="[INST] {message} [/INST]"
95
+ template2=" {response}</s>"
96
+ if("Openchat-3.5" in modelPath): #https://huggingface.co/TheBloke/openchat-3.5-0106-GGUF
97
+ template0="GPT4 Correct User: {system}<|end_of_turn|>GPT4 Correct Assistant: Okay.<|end_of_turn|>"
98
+ template1="GPT4 Correct User: {message}<|end_of_turn|>GPT4 Correct Assistant: "
99
+ template2="{response}<|end_of_turn|>"
100
+ if(("Discolm_german_7b" in modelPath) or ("SauerkrautLM-7b-HerO" in modelPath)): #https://huggingface.co/VAGOsolutions/SauerkrautLM-7b-HerO
101
+ template0="<|im_start|>system\n{system}<|im_end|>\n"
102
+ template1="<|im_start|>user\n{message}<|im_end|>\n<|im_start|>assistant\n"
103
+ template2="{response}<|im_end|>\n"
104
+ if("WizardLM-13B-V1.2" in modelPath): #https://huggingface.co/WizardLM/WizardLM-13B-V1.2
105
+ template0="{system} " #<s>
106
+ template1="USER: {message} ASSISTANT: "
107
+ template2="{response}</s>"
108
+ if("Phi-2" in modelPath): #https://huggingface.co/TheBloke/phi-2-GGUF
109
+ template0="Instruct: {system}\nOutput: Okay.\n"
110
+ template1="Instruct: {message}\nOutput:"
111
+ template2="{response}\n"
112
+ prompt = ""
113
+ if RAGAddon is not None:
114
+ system += RAGAddon
115
+ if system is not None:
116
+ prompt += template0.format(system=system) #"<s>"
117
+ if history is not None:
118
+ for user_message, bot_response in history[-historylimit:]:
119
+ if user_message is None: user_message = ""
120
+ if bot_response is None: bot_response = ""
121
+ bot_response = re.sub("\n\n<details>((.|\n)*?)</details>","", bot_response) # remove RAG-compontents
122
+ if removeHTML==True: bot_response = re.sub("<(.*?)>","\n", bot_response) # remove HTML-components in general (may cause bugs with markdown-rendering)
123
+ if user_message is not None: prompt += template1.format(message=user_message[:zeichenlimit])
124
+ if bot_response is not None: prompt += template2.format(response=bot_response[:zeichenlimit])
125
+ if message is not None: prompt += template1.format(message=message[:zeichenlimit])
126
+ if system2 is not None:
127
+ prompt += system2
128
+ return startOfString+prompt
129
+
130
 
131
  def response(
132
  prompt, history, temperature=0.9, max_new_tokens=500, top_p=0.95, repetition_penalty=1.0,
 
159
  addon=" Bitte berücksichtige bei deiner Antwort ggf. folgende Auszüge aus unserer Datenbank, sofern sie für die Antwort erforderlich sind. Beantworte die Frage knapp und präzise. Ignoriere unpassende Datenbank-Auszüge OHNE sie zu kommentieren, zu erwähnen oder aufzulisten:\n"+"\n".join(results)
160
  system="Du bist ein deutschsprachiges KI-basiertes Assistenzsystem, das zu jedem Anliegen möglichst geeignete KI-Tools empfiehlt."+addon+"\n\nUser-Anliegen:"
161
  #body={"prompt":system+"### Instruktion:\n"+message+"\n\n### Antwort:","max_tokens":500, "echo":"False","stream":"True"} #e.g. SauerkrautLM
162
+ formatted_prompt = extend_prompt(system+"\n"+prompt, history)
163
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
164
  output = ""
165
  for response in stream: