pszemraj commited on
Commit
d3f22a6
1 Parent(s): cc0f972

update text

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -40,7 +40,7 @@ def proc_submission(
40
  token_batch_length (int): the length of the token batches to use
41
  length_penalty (float): the length penalty to use
42
  repetition_penalty (float): the repetition penalty to use
43
- no_repeat_ngram_size (int): the no repeat ngram size to use
44
  max_input_length (int, optional): the maximum input length to use. Defaults to 1024.
45
 
46
  Returns:
@@ -166,13 +166,13 @@ if __name__ == "__main__":
166
 
167
  gr.Markdown("# Long-Form Summarization: LED & BookSum")
168
  gr.Markdown(
169
- "A simple demo using a fine-tuned LED model to summarize long-form text. See [model card](https://huggingface.co/pszemraj/led-large-book-summary) for a notebook with GPU inference (much faster) on Colab."
170
  )
171
  with gr.Column():
172
 
173
  gr.Markdown("## Load Inputs & Select Parameters")
174
  gr.Markdown(
175
- "Enter text below in the text area. The text will be summarized [using the selected parameters](https://huggingface.co/blog/how-to-generate). Optionally load an example below or upload a file."
176
  )
177
  with gr.Row():
178
  model_size = gr.Radio(
@@ -183,7 +183,7 @@ if __name__ == "__main__":
183
  label="Beam Search: # of Beams",
184
  value=2,
185
  )
186
- gr.Markdown("Load an example or a `.txt` file (_You may find [this OCR space](https://huggingface.co/spaces/pszemraj/pdf-ocr) useful_)")
187
  with gr.Row():
188
  example_name = gr.Dropdown(
189
  _examples,
@@ -270,7 +270,7 @@ if __name__ == "__main__":
270
  "- The two most important parameters-empirically-are the `num_beams` and `token_batch_length`. "
271
  )
272
  gr.Markdown(
273
- "- The model can be used with tag [pszemraj/led-large-book-summary](https://huggingface.co/pszemraj/led-large-book-summary). See the model card for details on usage & a notebook for a tutorial."
274
  )
275
  gr.Markdown("---")
276
 
 
40
  token_batch_length (int): the length of the token batches to use
41
  length_penalty (float): the length penalty to use
42
  repetition_penalty (float): the repetition penalty to use
43
+ no_repeat_ngram_size (int): the no-repeat ngram size to use
44
  max_input_length (int, optional): the maximum input length to use. Defaults to 1024.
45
 
46
  Returns:
 
166
 
167
  gr.Markdown("# Long-Form Summarization: LED & BookSum")
168
  gr.Markdown(
169
+ "LED models ([model card](https://huggingface.co/pszemraj/led-large-book-summary)) fine-tuned to summarize long-form text. A [space with other models can be found here](https://huggingface.co/spaces/pszemraj/document-summarization)"
170
  )
171
  with gr.Column():
172
 
173
  gr.Markdown("## Load Inputs & Select Parameters")
174
  gr.Markdown(
175
+ "Enter or upload text below, and it will be summarized [using the selected parameters](https://huggingface.co/blog/how-to-generate). "
176
  )
177
  with gr.Row():
178
  model_size = gr.Radio(
 
183
  label="Beam Search: # of Beams",
184
  value=2,
185
  )
186
+ gr.Markdown("Load a a .txt - example or your own (_You may find [this OCR space](https://huggingface.co/spaces/pszemraj/pdf-ocr) useful_)")
187
  with gr.Row():
188
  example_name = gr.Dropdown(
189
  _examples,
 
270
  "- The two most important parameters-empirically-are the `num_beams` and `token_batch_length`. "
271
  )
272
  gr.Markdown(
273
+ "- The model can be used with tag [pszemraj/led-large-book-summary](https://huggingface.co/pszemraj/led-large-book-summary). See the model card for details on usage & a Colab notebook for a tutorial."
274
  )
275
  gr.Markdown("---")
276