Blaise-g commited on
Commit
5393edb
β€’
1 Parent(s): bc1a4e2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -6
app.py CHANGED
@@ -6,7 +6,7 @@ import gradio as gr
6
  import nltk
7
  from cleantext import clean
8
 
9
- from summarize import load_model_and_tokenizer, summarize_via_tokenbatches
10
  from utils import load_example_filenames, truncate_word_count
11
 
12
  _here = Path(__file__).parent
@@ -155,7 +155,7 @@ if __name__ == "__main__":
155
 
156
  gr.Markdown("# Automatic summarization of biomedical research papers with neural abstractive methods into a long and comprehensive synopsis or extreme TLDR summary version")
157
  gr.Markdown(
158
- "A rather simple demo using an ad-hoc fine-tuned LongT5 or LED model to summarize long biomedical articles (or any scientific text related to the biomedical domain) into a detailed or extreme TLDR version."
159
  )
160
  with gr.Column():
161
 
@@ -164,8 +164,11 @@ if __name__ == "__main__":
164
  "Enter text below in the text area. The text will be summarized [using the selected text generation parameters](https://huggingface.co/blog/how-to-generate). Optionally load an available example below or upload a file."
165
  )
166
  with gr.Row():
167
- model_size = gr.Radio(
168
- choices=["tldr", "sumpubmed"], label="Model Variant", value="sumpubmed"
 
 
 
169
  )
170
  num_beams = gr.Radio(
171
  choices=[2, 3, 4],
@@ -173,7 +176,7 @@ if __name__ == "__main__":
173
  value=2,
174
  )
175
  gr.Markdown(
176
- "_The LED model is less performant than the LongT5 model, but is faster and will accept up to 2048 words per input (Large model accepts up to 768)._"
177
  )
178
  with gr.Row():
179
  length_penalty = gr.inputs.Slider(
@@ -245,7 +248,7 @@ if __name__ == "__main__":
245
  "The summary scores can be thought of as representing the quality of the summary. less-negative numbers (closer to 0) are better:"
246
  )
247
  summary_scores = gr.Textbox(
248
- label="Summary Scores", placeholder="Summary scores will appear here"
249
  )
250
 
251
  gr.Markdown("---")
 
6
  import nltk
7
  from cleantext import clean
8
 
9
+ from summ import load_model_and_tokenizer, summarize_via_tokenbatches
10
  from utils import load_example_filenames, truncate_word_count
11
 
12
  _here = Path(__file__).parent
 
155
 
156
  gr.Markdown("# Automatic summarization of biomedical research papers with neural abstractive methods into a long and comprehensive synopsis or extreme TLDR summary version")
157
  gr.Markdown(
158
+ "A rather simple demo (developed for my Master Thesis project) using an ad-hoc fine-tuned LongT5 or LED model to summarize long biomedical articles (or any scientific text related to the biomedical domain) into a detailed, explanatory synopsis or extreme TLDR version."
159
  )
160
  with gr.Column():
161
 
 
164
  "Enter text below in the text area. The text will be summarized [using the selected text generation parameters](https://huggingface.co/blog/how-to-generate). Optionally load an available example below or upload a file."
165
  )
166
  with gr.Row():
167
+ summary_type = gr.Radio(
168
+ choices=["tldr", "detailed"], label="Summary type", value="detailed"
169
+ )
170
+ model_type = gr.Radio(
171
+ choices=["LongT5", "LED"], label="Model type", value="LongT5"
172
  )
173
  num_beams = gr.Radio(
174
  choices=[2, 3, 4],
 
176
  value=2,
177
  )
178
  gr.Markdown(
179
+ "_The LED model is less performant than the LongT5 model, but it's smaller in terms of size and therefore all other parameters being equal allows for a larger _"
180
  )
181
  with gr.Row():
182
  length_penalty = gr.inputs.Slider(
 
248
  "The summary scores can be thought of as representing the quality of the summary. less-negative numbers (closer to 0) are better:"
249
  )
250
  summary_scores = gr.Textbox(
251
+ label="Compression rate πŸ—œ", placeholder="πŸ—œ will appear here"
252
  )
253
 
254
  gr.Markdown("---")