Spaces:
Runtime error
Runtime error
min width
Browse files
app.py
CHANGED
@@ -69,7 +69,15 @@ with gr.Blocks() as demo:
|
|
69 |
)
|
70 |
|
71 |
with gr.Row():
|
72 |
-
with gr.Column():
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
73 |
gr.Markdown("### Generation Settings")
|
74 |
use_assistant = gr.Checkbox(label="Use Assisted Generation", value=True)
|
75 |
max_new_tokens = gr.Slider(
|
@@ -78,17 +86,9 @@ with gr.Blocks() as demo:
|
|
78 |
temperature = gr.Slider(
|
79 |
minimum=0.0, maximum=2.0, value=0.0, step=0.1, interactive=True, label="Temperature (0.0 = Greedy)",
|
80 |
)
|
81 |
-
with gr.Column():
|
82 |
gr.Markdown("### Generation time (seconds)")
|
83 |
generation_time = gr.Textbox(lines=1, interactive=False, show_label=False)
|
84 |
|
85 |
-
user_text = gr.Textbox(
|
86 |
-
placeholder="A sequence: one, two, three, ",
|
87 |
-
label="Prompt"
|
88 |
-
)
|
89 |
-
model_output = gr.Textbox(label="Model output", lines=10, interactive=False)
|
90 |
-
button_submit = gr.Button(value="Submit")
|
91 |
-
|
92 |
generate_inputs = [user_text, use_assistant, temperature, max_new_tokens]
|
93 |
generate_outputs = [model_output, generation_time]
|
94 |
user_text.submit(run_generation, generate_inputs, generate_outputs)
|
|
|
69 |
)
|
70 |
|
71 |
with gr.Row():
|
72 |
+
with gr.Column(scale=4):
|
73 |
+
user_text = gr.Textbox(
|
74 |
+
placeholder="A sequence: one, two, three, ",
|
75 |
+
label="Prompt"
|
76 |
+
)
|
77 |
+
model_output = gr.Textbox(label="Model output", lines=10, interactive=False)
|
78 |
+
button_submit = gr.Button(value="Submit")
|
79 |
+
|
80 |
+
with gr.Column(scale=1, min_width=200):
|
81 |
gr.Markdown("### Generation Settings")
|
82 |
use_assistant = gr.Checkbox(label="Use Assisted Generation", value=True)
|
83 |
max_new_tokens = gr.Slider(
|
|
|
86 |
temperature = gr.Slider(
|
87 |
minimum=0.0, maximum=2.0, value=0.0, step=0.1, interactive=True, label="Temperature (0.0 = Greedy)",
|
88 |
)
|
|
|
89 |
gr.Markdown("### Generation time (seconds)")
|
90 |
generation_time = gr.Textbox(lines=1, interactive=False, show_label=False)
|
91 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
92 |
generate_inputs = [user_text, use_assistant, temperature, max_new_tokens]
|
93 |
generate_outputs = [model_output, generation_time]
|
94 |
user_text.submit(run_generation, generate_inputs, generate_outputs)
|