Staticaliza commited on
Commit
2840ba5
1 Parent(s): e17f223

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -6
app.py CHANGED
@@ -15,7 +15,7 @@ model._is_chat_session_activated = False
15
 
16
  max_new_tokens = 2048
17
 
18
- def predict(input, instruction, history, temperature, top_p, top_k, rep_p, max_tokens):
19
 
20
  history = history or []
21
  formatted_input = "<s>"
@@ -28,11 +28,10 @@ def predict(input, instruction, history, temperature, top_p, top_k, rep_p, max_t
28
 
29
  result = model.generate(
30
  formatted_input,
31
- temperature = temperature,
32
  max_new_tokens = max_tokens,
33
  top_p = top_p,
34
  top_k = top_k,
35
- repetition_penalty = rep_p,
36
  do_sample = True,
37
  stream = False,
38
  details = False,
@@ -54,7 +53,7 @@ with gr.Blocks() as demo:
54
  with gr.Row():
55
  with gr.Column():
56
  history = gr.Chatbot(label = "History", elem_id = "chatbot")
57
- input = gr.Textbox(label = "Input", value = "", lines = 2)
58
  instruction = gr.Textbox(label = "Instruction", value = "You are an AI chatbot.", lines = 4)
59
  run = gr.Button("▶")
60
  clear = gr.Button("🗑️")
@@ -63,14 +62,13 @@ with gr.Blocks() as demo:
63
  temperature = gr.Slider( minimum = 0, maximum = 2, value = 1, step = 0.01, interactive = True, label = "Temperature" )
64
  top_p = gr.Slider( minimum = 0.01, maximum = 0.99, value = 0.95, step = 0.01, interactive = True, label = "Top P" )
65
  top_k = gr.Slider( minimum = 1, maximum = 2048, value = 50, step = 1, interactive = True, label = "Top K" )
66
- rep_p = gr.Slider( minimum = 0.01, maximum = 2, value = 1.2, step = 0.01, interactive = True, label = "Repetition Penalty" )
67
  max_tokens = gr.Slider( minimum = 1, maximum = 2048, value = 32, step = 64, interactive = True, label = "Max New Tokens" )
68
 
69
  with gr.Row():
70
  with gr.Column():
71
  output = gr.Textbox(label = "Output", value = "", lines = 50)
72
 
73
- run.click(predict, inputs = [input, instruction, history, temperature, top_p, top_k, rep_p, max_tokens], outputs = [output, input, history])
74
  clear.click(clear_history, [], history)
75
 
76
  demo.queue(concurrency_count = 500, api_open = True).launch(show_api = True)
 
15
 
16
  max_new_tokens = 2048
17
 
18
+ def predict(input, instruction, history, temperature, top_p, top_k, max_tokens):
19
 
20
  history = history or []
21
  formatted_input = "<s>"
 
28
 
29
  result = model.generate(
30
  formatted_input,
31
+ temp = temperature,
32
  max_new_tokens = max_tokens,
33
  top_p = top_p,
34
  top_k = top_k,
 
35
  do_sample = True,
36
  stream = False,
37
  details = False,
 
53
  with gr.Row():
54
  with gr.Column():
55
  history = gr.Chatbot(label = "History", elem_id = "chatbot")
56
+ input = gr.Textbox(label = "Input", value = "Hi.", lines = 2)
57
  instruction = gr.Textbox(label = "Instruction", value = "You are an AI chatbot.", lines = 4)
58
  run = gr.Button("▶")
59
  clear = gr.Button("🗑️")
 
62
  temperature = gr.Slider( minimum = 0, maximum = 2, value = 1, step = 0.01, interactive = True, label = "Temperature" )
63
  top_p = gr.Slider( minimum = 0.01, maximum = 0.99, value = 0.95, step = 0.01, interactive = True, label = "Top P" )
64
  top_k = gr.Slider( minimum = 1, maximum = 2048, value = 50, step = 1, interactive = True, label = "Top K" )
 
65
  max_tokens = gr.Slider( minimum = 1, maximum = 2048, value = 32, step = 64, interactive = True, label = "Max New Tokens" )
66
 
67
  with gr.Row():
68
  with gr.Column():
69
  output = gr.Textbox(label = "Output", value = "", lines = 50)
70
 
71
+ run.click(predict, inputs = [input, instruction, history, temperature, top_p, top_k, max_tokens], outputs = [output, input, history])
72
  clear.click(clear_history, [], history)
73
 
74
  demo.queue(concurrency_count = 500, api_open = True).launch(show_api = True)