pszemraj commited on
Commit
b542f3a
2 Parent(s): 3094ba9 d3f22a6

Merge branch 'main' of https://huggingface.co/spaces/pszemraj/summarize-long-text

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -40,7 +40,7 @@ def proc_submission(
40
  token_batch_length (int): the length of the token batches to use
41
  length_penalty (float): the length penalty to use
42
  repetition_penalty (float): the repetition penalty to use
43
- no_repeat_ngram_size (int): the no repeat ngram size to use
44
  max_input_length (int, optional): the maximum input length to use. Defaults to 1024.
45
 
46
  Returns:
@@ -181,13 +181,13 @@ if __name__ == "__main__":
181
 
182
  gr.Markdown("# Long-Form Summarization: LED & BookSum")
183
  gr.Markdown(
184
- "A simple demo using a fine-tuned LED model to summarize long-form text. See [model card](https://huggingface.co/pszemraj/led-large-book-summary) for a notebook with GPU inference (much faster) on Colab."
185
  )
186
  with gr.Column():
187
 
188
  gr.Markdown("## Load Inputs & Select Parameters")
189
  gr.Markdown(
190
- "Enter text below in the text area. The text will be summarized [using the selected parameters](https://huggingface.co/blog/how-to-generate). Optionally load an example below or upload a file."
191
  )
192
  with gr.Row():
193
  model_size = gr.Radio(
@@ -198,7 +198,7 @@ if __name__ == "__main__":
198
  label="Beam Search: # of Beams",
199
  value=2,
200
  )
201
- gr.Markdown("Load an example or a `.txt` file (_You may find [this OCR space](https://huggingface.co/spaces/pszemraj/pdf-ocr) useful_)")
202
  with gr.Row():
203
  example_name = gr.Dropdown(
204
  _examples,
@@ -285,7 +285,7 @@ if __name__ == "__main__":
285
  "- The two most important parameters-empirically-are the `num_beams` and `token_batch_length`. "
286
  )
287
  gr.Markdown(
288
- "- The model can be used with tag [pszemraj/led-large-book-summary](https://huggingface.co/pszemraj/led-large-book-summary). See the model card for details on usage & a notebook for a tutorial."
289
  )
290
  gr.Markdown("---")
291
 
 
40
  token_batch_length (int): the length of the token batches to use
41
  length_penalty (float): the length penalty to use
42
  repetition_penalty (float): the repetition penalty to use
43
+ no_repeat_ngram_size (int): the no-repeat ngram size to use
44
  max_input_length (int, optional): the maximum input length to use. Defaults to 1024.
45
 
46
  Returns:
 
181
 
182
  gr.Markdown("# Long-Form Summarization: LED & BookSum")
183
  gr.Markdown(
184
+ "LED models ([model card](https://huggingface.co/pszemraj/led-large-book-summary)) fine-tuned to summarize long-form text. A [space with other models can be found here](https://huggingface.co/spaces/pszemraj/document-summarization)"
185
  )
186
  with gr.Column():
187
 
188
  gr.Markdown("## Load Inputs & Select Parameters")
189
  gr.Markdown(
190
+ "Enter or upload text below, and it will be summarized [using the selected parameters](https://huggingface.co/blog/how-to-generate). "
191
  )
192
  with gr.Row():
193
  model_size = gr.Radio(
 
198
  label="Beam Search: # of Beams",
199
  value=2,
200
  )
201
+ gr.Markdown("Load a a .txt - example or your own (_You may find [this OCR space](https://huggingface.co/spaces/pszemraj/pdf-ocr) useful_)")
202
  with gr.Row():
203
  example_name = gr.Dropdown(
204
  _examples,
 
285
  "- The two most important parameters-empirically-are the `num_beams` and `token_batch_length`. "
286
  )
287
  gr.Markdown(
288
+ "- The model can be used with tag [pszemraj/led-large-book-summary](https://huggingface.co/pszemraj/led-large-book-summary). See the model card for details on usage & a Colab notebook for a tutorial."
289
  )
290
  gr.Markdown("---")
291