Leyo commited on
Commit
e3795af
1 Parent(s): 5b1ab48

fix default values

Browse files
Files changed (1) hide show
  1. app_dialogue.py +15 -15
app_dialogue.py CHANGED
@@ -303,7 +303,7 @@ with gr.Blocks(title="IDEFICS", theme=gr.themes.Base()) as demo:
303
  top_p = gr.Slider(
304
  minimum=0.0,
305
  maximum=1.0,
306
- value=0.7,
307
  step=0.1,
308
  interactive=True,
309
  label="Top P",
@@ -326,7 +326,7 @@ with gr.Blocks(title="IDEFICS", theme=gr.themes.Base()) as demo:
326
  )
327
  repetition_penalty = gr.Slider(
328
  minimum=0.0,
329
- maximum=10.0,
330
  value=1.0,
331
  step=0.1,
332
  interactive=True,
@@ -342,7 +342,7 @@ with gr.Blocks(title="IDEFICS", theme=gr.themes.Base()) as demo:
342
  )
343
  length_penalty = gr.Slider(
344
  minimum=0.0,
345
- maximum=10.0,
346
  value=1.0,
347
  step=0.1,
348
  interactive=True,
@@ -358,9 +358,9 @@ with gr.Blocks(title="IDEFICS", theme=gr.themes.Base()) as demo:
358
  )
359
  penalty_alpha = gr.Slider(
360
  minimum=0.0,
361
- maximum=10.0,
362
  value=0.95,
363
- step=1.0,
364
  interactive=True,
365
  label="Penalty alpha",
366
  )
@@ -435,16 +435,16 @@ with gr.Blocks(title="IDEFICS", theme=gr.themes.Base()) as demo:
435
  def model_inference(
436
  user_prompt,
437
  chat_history,
438
- decoding_strategy="greedy",
439
- temperature=1.0,
440
- no_repeat_ngram_size=0,
441
- max_new_tokens=512,
442
- min_length=16,
443
- repetition_penalty=1.0,
444
- length_penalty=1.0,
445
- top_k=50,
446
- top_p=0.95,
447
- penalty_alpha=0.95,
448
  ):
449
  global processor, model, tokenizer
450
  # temperature = 1.0
 
303
  top_p = gr.Slider(
304
  minimum=0.0,
305
  maximum=1.0,
306
+ value=0.95,
307
  step=0.1,
308
  interactive=True,
309
  label="Top P",
 
326
  )
327
  repetition_penalty = gr.Slider(
328
  minimum=0.0,
329
+ maximum=5.0,
330
  value=1.0,
331
  step=0.1,
332
  interactive=True,
 
342
  )
343
  length_penalty = gr.Slider(
344
  minimum=0.0,
345
+ maximum=5.0,
346
  value=1.0,
347
  step=0.1,
348
  interactive=True,
 
358
  )
359
  penalty_alpha = gr.Slider(
360
  minimum=0.0,
361
+ maximum=5.0,
362
  value=0.95,
363
+ step=0.05,
364
  interactive=True,
365
  label="Penalty alpha",
366
  )
 
435
  def model_inference(
436
  user_prompt,
437
  chat_history,
438
+ decoding_strategy,
439
+ temperature,
440
+ no_repeat_ngram_size,
441
+ max_new_tokens,
442
+ min_length,
443
+ repetition_penalty,
444
+ length_penalty,
445
+ top_k,
446
+ top_p,
447
+ penalty_alpha,
448
  ):
449
  global processor, model, tokenizer
450
  # temperature = 1.0