GaneshK commited on
Commit
5f843d1
1 Parent(s): b4be431

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -68
app.py CHANGED
@@ -168,65 +168,8 @@
168
 
169
 
170
 
171
- # from huggingface_hub import InferenceClient
172
- # import gradio as gr
173
- # client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.2")
174
-
175
- # def format_prompt(message, history):
176
- # prompt = "<s>"
177
- # for user_prompt, bot_response in history:
178
- # prompt += f"[INST] {user_prompt} [/INST]"
179
- # prompt += f" {bot_response}</s> "
180
- # prompt += f"[INST] {message} [/INST]"
181
- # return prompt
182
-
183
- # def generate(
184
- # prompt, history, temperature=0.2, max_new_tokens=3000, top_p=0.95, repetition_penalty=1.0,
185
- # ):
186
- # temperature = float(temperature)
187
- # if temperature < 1e-2:
188
- # temperature = 1e-2
189
- # top_p = float(top_p)
190
-
191
- # generate_kwargs = dict(
192
- # temperature=temperature,
193
- # max_new_tokens=max_new_tokens,
194
- # top_p=top_p,
195
- # repetition_penalty=repetition_penalty,
196
- # do_sample=True,
197
- # seed=42,
198
- # )
199
-
200
- # formatted_prompt = format_prompt(prompt, history)
201
-
202
- # stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
203
- # output = ""
204
-
205
- # for response in stream:
206
- # output += response.token.text
207
- # yield output
208
- # return output
209
-
210
-
211
- # mychatbot = gr.Chatbot(
212
- # avatar_images=["./user.png", "./bot.png"], bubble_full_width=False, show_label=False, show_copy_button=True, likeable=True,)
213
-
214
- # demo = gr.ChatInterface(fn=generate,
215
- # chatbot=mychatbot,
216
- # title="Mistral-Chat",
217
- # retry_btn=None,
218
- # undo_btn=None
219
- # )
220
-
221
- # demo.queue().launch(show_api=False)
222
-
223
-
224
-
225
-
226
-
227
  from huggingface_hub import InferenceClient
228
- import gradio as gr
229
-
230
  client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.2")
231
 
232
  def format_prompt(message, history):
@@ -238,7 +181,7 @@ def format_prompt(message, history):
238
  return prompt
239
 
240
  def generate(
241
- prompt, history, temperature=0.3, max_new_tokens=3000, top_p=0.90
242
  ):
243
  temperature = float(temperature)
244
  if temperature < 1e-2:
@@ -249,9 +192,9 @@ def generate(
249
  temperature=temperature,
250
  max_new_tokens=max_new_tokens,
251
  top_p=top_p,
252
- #repetition_penalty=repetition_penalty,
253
- #do_sample=True,
254
- #seed=42,
255
  )
256
 
257
  formatted_prompt = format_prompt(prompt, history)
@@ -263,8 +206,7 @@ def generate(
263
  output += response.token.text
264
  yield output
265
  return output
266
-
267
-
268
  additional_inputs=[
269
  gr.Slider(
270
  label="temperature",
@@ -296,13 +238,16 @@ additional_inputs=[
296
  )
297
  ]
298
 
299
- bbchatbot = gr.Chatbot(
 
300
  avatar_images=["./user.png", "./bot.png"], bubble_full_width=False, show_label=False, show_copy_button=True, likeable=True,)
301
 
302
  demo = gr.ChatInterface(fn=generate,
303
- chatbot=bbchatbot,
304
  title="Mistral-Chat",
305
- additional_inputs=additional_inputs
 
 
306
  )
307
 
308
  demo.queue().launch(show_api=False)
@@ -312,4 +257,3 @@ demo.queue().launch(show_api=False)
312
 
313
 
314
 
315
-
 
168
 
169
 
170
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
171
  from huggingface_hub import InferenceClient
172
+ import gradio as gr
 
173
  client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.2")
174
 
175
  def format_prompt(message, history):
 
181
  return prompt
182
 
183
  def generate(
184
+ prompt, history, temperature=0.3, max_new_tokens=3000, top_p=0.90, repetition_penalty=1.0,
185
  ):
186
  temperature = float(temperature)
187
  if temperature < 1e-2:
 
192
  temperature=temperature,
193
  max_new_tokens=max_new_tokens,
194
  top_p=top_p,
195
+ repetition_penalty=repetition_penalty,
196
+ do_sample=True,
197
+ seed=42,
198
  )
199
 
200
  formatted_prompt = format_prompt(prompt, history)
 
206
  output += response.token.text
207
  yield output
208
  return output
209
+
 
210
  additional_inputs=[
211
  gr.Slider(
212
  label="temperature",
 
238
  )
239
  ]
240
 
241
+
242
+ mychatbot = gr.Chatbot(
243
  avatar_images=["./user.png", "./bot.png"], bubble_full_width=False, show_label=False, show_copy_button=True, likeable=True,)
244
 
245
  demo = gr.ChatInterface(fn=generate,
246
+ chatbot=mychatbot,
247
  title="Mistral-Chat",
248
+ additional_inputs=additional_inputs,
249
+ retry_btn=None,
250
+ undo_btn=None
251
  )
252
 
253
  demo.queue().launch(show_api=False)
 
257
 
258
 
259