Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
peter szemraj
commited on
Commit
•
bc946c2
1
Parent(s):
1bc0267
format
Browse files
app.py
CHANGED
@@ -174,7 +174,9 @@ if __name__ == "__main__":
|
|
174 |
label="num beams",
|
175 |
value=2,
|
176 |
)
|
177 |
-
gr.Markdown(
|
|
|
|
|
178 |
with gr.Row():
|
179 |
token_batch_length = gr.Radio(
|
180 |
choices=[512, 768, 1024],
|
@@ -182,7 +184,11 @@ if __name__ == "__main__":
|
|
182 |
value=512,
|
183 |
)
|
184 |
length_penalty = gr.inputs.Slider(
|
185 |
-
minimum=0.5,
|
|
|
|
|
|
|
|
|
186 |
)
|
187 |
with gr.Row():
|
188 |
repetition_penalty = gr.inputs.Slider(
|
@@ -227,9 +233,9 @@ if __name__ == "__main__":
|
|
227 |
"Summary generation should take approximately 1-2 minutes for most settings."
|
228 |
)
|
229 |
summarize_button = gr.Button(
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
|
234 |
output_text = gr.HTML("<p><em>Output will appear below:</em></p>")
|
235 |
gr.Markdown("### Summary Output")
|
|
|
174 |
label="num beams",
|
175 |
value=2,
|
176 |
)
|
177 |
+
gr.Markdown(
|
178 |
+
"_The base model is less performant than the large model, but is faster and will accept up to 2048 words per input (Large model accepts up to 768)._"
|
179 |
+
)
|
180 |
with gr.Row():
|
181 |
token_batch_length = gr.Radio(
|
182 |
choices=[512, 768, 1024],
|
|
|
184 |
value=512,
|
185 |
)
|
186 |
length_penalty = gr.inputs.Slider(
|
187 |
+
minimum=0.5,
|
188 |
+
maximum=1.0,
|
189 |
+
label="length penalty",
|
190 |
+
default=0.7,
|
191 |
+
step=0.05,
|
192 |
)
|
193 |
with gr.Row():
|
194 |
repetition_penalty = gr.inputs.Slider(
|
|
|
233 |
"Summary generation should take approximately 1-2 minutes for most settings."
|
234 |
)
|
235 |
summarize_button = gr.Button(
|
236 |
+
"Summarize!",
|
237 |
+
variant="primary",
|
238 |
+
)
|
239 |
|
240 |
output_text = gr.HTML("<p><em>Output will appear below:</em></p>")
|
241 |
gr.Markdown("### Summary Output")
|