Paula Leonova commited on
Commit
3fbee32
1 Parent(s): 4a14080

Place summary generation under an expander

Browse files
Files changed (1) hide show
  1. app.py +22 -20
app.py CHANGED
@@ -60,26 +60,28 @@ if submit_button:
60
  st.write('Enter some text and at least one possible topic to see predictions.')
61
 
62
  with st.spinner('Generating summaries and matching labels...'):
63
- # For each body of text, create text chunks of a certain token size required for the transformer
64
- nested_sentences = create_nest_sentences(document = text_input, token_max_length = 1024)
65
-
66
- summary = []
67
- st.markdown("### Text Chunk & Summaries")
68
- st.markdown("Breaks up the original text into sections with complete sentences totaling \
69
- less than 1024 tokens, a requirement for the summarizer.")
70
-
71
- # For each chunk of sentences (within the token max), generate a summary
72
- for n in range(0, len(nested_sentences)):
73
- text_chunk = " ".join(map(str, nested_sentences[n]))
74
- st.markdown(f"###### Original Text Chunk {n+1}/{len(nested_sentences)}" )
75
- st.markdown(text_chunk)
76
-
77
- chunk_summary = summarizer_gen(summarizer, sequence=text_chunk, maximum_tokens = 300, minimum_tokens = 20)
78
- summary.append(chunk_summary)
79
- st.markdown(f"###### Partial Summary {n+1}/{len(nested_sentences)}")
80
- st.markdown(chunk_summary)
81
- # Combine all the summaries into a list and compress into one document, again
82
- final_summary = " \n\n".join(list(summary))
 
 
83
 
84
  # final_summary = summarizer_gen(summarizer, sequence=text_input, maximum_tokens = 30, minimum_tokens = 100)
85
  st.markdown("### Combined Summary")
 
60
  st.write('Enter some text and at least one possible topic to see predictions.')
61
 
62
  with st.spinner('Generating summaries and matching labels...'):
63
+ my_expander = st.expander(label='Expand to see summary generation details')
64
+ with my_expander:
65
+ # For each body of text, create text chunks of a certain token size required for the transformer
66
+ nested_sentences = create_nest_sentences(document = text_input, token_max_length = 1024)
67
+
68
+ summary = []
69
+ # st.markdown("### Text Chunk & Summaries")
70
+ st.markdown("_Breaks up the original text into sections with complete sentences totaling \
71
+ less than 1024 tokens, a requirement for the summarizer._")
72
+
73
+ # For each chunk of sentences (within the token max), generate a summary
74
+ for n in range(0, len(nested_sentences)):
75
+ text_chunk = " ".join(map(str, nested_sentences[n]))
76
+ st.markdown(f"###### Original Text Chunk {n+1}/{len(nested_sentences)}" )
77
+ st.markdown(text_chunk)
78
+
79
+ chunk_summary = summarizer_gen(summarizer, sequence=text_chunk, maximum_tokens = 300, minimum_tokens = 20)
80
+ summary.append(chunk_summary)
81
+ st.markdown(f"###### Partial Summary {n+1}/{len(nested_sentences)}")
82
+ st.markdown(chunk_summary)
83
+ # Combine all the summaries into a list and compress into one document, again
84
+ final_summary = " \n\n".join(list(summary))
85
 
86
  # final_summary = summarizer_gen(summarizer, sequence=text_input, maximum_tokens = 30, minimum_tokens = 100)
87
  st.markdown("### Combined Summary")