piecurus commited on
Commit
0789c2e
1 Parent(s): 201579f
Files changed (1) hide show
  1. app.py +122 -0
app.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import nltk
2
+ import validators
3
+ import streamlit as st
4
+ from transformers import AutoTokenizer, pipeline
5
+
6
+ # local modules
7
+ from extractive_summarizer.model_processors import Summarizer
8
+ from utils import (
9
+ clean_text,
10
+ fetch_article_text,
11
+ preprocess_text_for_abstractive_summarization,
12
+ read_text_from_file,
13
+ )
14
+
15
+ if __name__ == "__main__":
16
+ # ---------------------------------
17
+ # Main Application
18
+ # ---------------------------------
19
+ st.title("Text Summarizer 📝")
20
+
21
+ st.markdown("Creator: [Atharva Ingle](https://github.com/Gladiator07)")
22
+ st.markdown(
23
+ "Source code: [GitHub Repository](https://github.com/Gladiator07/Text-Summarizer)"
24
+ )
25
+ summarize_type = st.sidebar.selectbox(
26
+ "Summarization type", options=["Extractive", "Abstractive"]
27
+ )
28
+
29
+ st.markdown(
30
+ "Enter a text or a url to get a concise summary of the article while conserving the overall meaning. This app supports text in the following formats:"
31
+ )
32
+ st.markdown(
33
+ """- Raw text in text box
34
+ - URL of article/news to be summarized
35
+ - .txt, .pdf, .docx file formats"""
36
+ )
37
+ st.markdown(
38
+ """This app supports two type of summarization:
39
+
40
+ 1. **Extractive Summarization**: The extractive approach involves picking up the most important phrases and lines from the documents. It then combines all the important lines to create the summary. So, in this case, every line and word of the summary actually belongs to the original document which is summarized.
41
+ 2. **Abstractive Summarization**: The abstractive approach involves rephrasing the complete document while capturing the complete meaning of the document. This type of summarization provides more human-like summary"""
42
+ )
43
+ st.markdown("---")
44
+ # ---------------------------
45
+ # SETUP & Constants
46
+ nltk.download("punkt")
47
+ abs_tokenizer_name = "facebook/bart-large-cnn"
48
+ abs_model_name = "facebook/bart-large-cnn"
49
+ abs_tokenizer = AutoTokenizer.from_pretrained(abs_tokenizer_name)
50
+ abs_max_length = 90
51
+ abs_min_length = 30
52
+ # ---------------------------
53
+
54
+ inp_text = st.text_input("Enter text or a url here")
55
+ st.markdown(
56
+ "<h3 style='text-align: center; color: green;'>OR</h3>",
57
+ unsafe_allow_html=True,
58
+ )
59
+ uploaded_file = st.file_uploader(
60
+ "Upload a .txt, .pdf, .docx file for summarization"
61
+ )
62
+
63
+ is_url = validators.url(inp_text)
64
+ if is_url:
65
+ # complete text, chunks to summarize (list of sentences for long docs)
66
+ text, clean_txt = fetch_article_text(url=inp_text)
67
+ elif uploaded_file:
68
+ clean_txt = read_text_from_file(uploaded_file)
69
+ clean_txt = clean_text(clean_txt)
70
+ else:
71
+ clean_txt = clean_text(inp_text)
72
+
73
+ # view summarized text (expander)
74
+ with st.expander("View input text"):
75
+ if is_url:
76
+ st.write(clean_txt[0])
77
+ else:
78
+ st.write(clean_txt)
79
+ summarize = st.button("Summarize")
80
+
81
+ # called on toggle button [summarize]
82
+ if summarize:
83
+ if summarize_type == "Extractive":
84
+ if is_url:
85
+ text_to_summarize = " ".join([txt for txt in clean_txt])
86
+ else:
87
+ text_to_summarize = clean_txt
88
+ # extractive summarizer
89
+
90
+ with st.spinner(
91
+ text="Creating extractive summary. This might take a few seconds ..."
92
+ ):
93
+ ext_model = Summarizer()
94
+ summarized_text = ext_model(text_to_summarize, num_sentences=5)
95
+
96
+ elif summarize_type == "Abstractive":
97
+ with st.spinner(
98
+ text="Creating abstractive summary. This might take a few seconds ..."
99
+ ):
100
+ text_to_summarize = clean_txt
101
+ abs_summarizer = pipeline(
102
+ "summarization", model=abs_model_name, tokenizer=abs_tokenizer_name
103
+ )
104
+
105
+ if is_url is False:
106
+ # list of chunks
107
+ text_to_summarize = preprocess_text_for_abstractive_summarization(
108
+ tokenizer=abs_tokenizer, text=clean_txt
109
+ )
110
+
111
+ tmp_sum = abs_summarizer(
112
+ text_to_summarize,
113
+ max_length=abs_max_length,
114
+ min_length=abs_min_length,
115
+ do_sample=False,
116
+ )
117
+
118
+ summarized_text = " ".join([summ["summary_text"] for summ in tmp_sum])
119
+
120
+ # final summarized output
121
+ st.subheader("Summarized text")
122
+ st.info(summarized_text)