ashhadahsan commited on
Commit
6f595b5
β€’
1 Parent(s): 1cb713d

Add application file

Browse files
app.py ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import pandas as pd
3
+ from transformers import pipeline
4
+ from stqdm import stqdm
5
+ from simplet5 import SimpleT5
6
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
7
+
8
+
9
+ @st.cache
10
+ def load_t5():
11
+ model = AutoModelForSeq2SeqLM.from_pretrained("t5-base")
12
+
13
+ tokenizer = AutoTokenizer.from_pretrained("t5-base")
14
+ return model, tokenizer
15
+
16
+
17
+ @st.cache(allow_output_mutation=False)
18
+ def custom_model():
19
+ return pipeline("summarization", model="my_awesome_sum/")
20
+
21
+
22
+ @st.cache
23
+ def convert_df(df):
24
+ # IMPORTANT: Cache the conversion to prevent computation on every rerun
25
+ return df.to_csv().encode("utf-8")
26
+
27
+
28
+ @st.cache
29
+ def load_one_line_summarizer(model):
30
+ return model.load_model("t5", "snrspeaks/t5-one-line-summary")
31
+
32
+
33
+ st.set_page_config(layout="wide", page_title="Amazon Review Summarizer")
34
+ st.title("Amazon Review Summarizer")
35
+
36
+ uploaded_file = st.file_uploader("Choose a file", type=["xlsx", "xls", "csv"])
37
+ summarizer_option = st.selectbox(
38
+ "Select Summarizer",
39
+ ("Custom trained on the dataset", "t5-base", "t5-one-line-summary"),
40
+ )
41
+ ps = st.empty()
42
+ if st.button("Process"):
43
+ if uploaded_file is not None:
44
+
45
+ df = pd.read_excel(uploaded_file)
46
+ columns = df.columns.values.tolist()
47
+ columns = [x.lower() for x in columns]
48
+ df.columns = columns
49
+ print(summarizer_option)
50
+ if summarizer_option == "Custom trained on the dataset":
51
+ model = custom_model()
52
+ print(summarizer_option)
53
+ text = df["text"].values.tolist()
54
+ progress_text = "Summarization in progress. Please wait."
55
+ summary = []
56
+
57
+ for x in stqdm(range(len(text))):
58
+ try:
59
+ summary.append(
60
+ model(
61
+ f"summarize: {text[x]}", max_length=50, early_stopping=True
62
+ )[0]["summary_text"]
63
+ )
64
+ except:
65
+ pass
66
+ output = pd.DataFrame(
67
+ {"text": df["text"].values.tolist(), "summary": summary}
68
+ )
69
+ csv = convert_df(output)
70
+ st.download_button(
71
+ label="Download data as CSV",
72
+ data=csv,
73
+ file_name=f"{summarizer_option}_df.csv",
74
+ mime="text/csv",
75
+ )
76
+ if summarizer_option == "t5-base":
77
+ model, tokenizer = load_t5()
78
+ text = df["text"].values.tolist()
79
+ summary = []
80
+ for x in stqdm(range(10)):
81
+
82
+ tokens_input = tokenizer.encode(
83
+ "summarize: " + text[x],
84
+ return_tensors="pt",
85
+ max_length=tokenizer.model_max_length,
86
+ truncation=True,
87
+ )
88
+ summary_ids = model.generate(
89
+ tokens_input,
90
+ min_length=80,
91
+ max_length=150,
92
+ length_penalty=20,
93
+ num_beams=2,
94
+ )
95
+ summary_gen = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
96
+ summary.append(summary_gen)
97
+
98
+ output = pd.DataFrame(
99
+ {"text": df["text"].values.tolist()[0:10], "summary": summary}
100
+ )
101
+ csv = convert_df(output)
102
+ st.download_button(
103
+ label="Download data as CSV",
104
+ data=csv,
105
+ file_name=f"{summarizer_option}_df.csv",
106
+ mime="text/csv",
107
+ )
108
+
109
+ if summarizer_option == "t5-one-line-summary":
110
+ model = SimpleT5()
111
+ text = df["text"].values.tolist()
112
+
113
+ load_one_line_summarizer(model=model)
114
+
115
+ summary = []
116
+ for x in stqdm(range(10)):
117
+ try:
118
+ summary.append(model.predict(text[x])[0])
119
+ except:
120
+ pass
121
+ output = pd.DataFrame(
122
+ {"text": df["text"].values.tolist()[0:10], "summary": summary}
123
+ )
124
+ csv = convert_df(output)
125
+ st.download_button(
126
+ label="Download data as CSV",
127
+ data=csv,
128
+ file_name=f"{summarizer_option}_df.csv",
129
+ mime="text/csv",
130
+ )
my_awesome_sum/config.json ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "t5-small",
3
+ "architectures": [
4
+ "T5ForConditionalGeneration"
5
+ ],
6
+ "d_ff": 2048,
7
+ "d_kv": 64,
8
+ "d_model": 512,
9
+ "decoder_start_token_id": 0,
10
+ "dense_act_fn": "relu",
11
+ "dropout_rate": 0.1,
12
+ "eos_token_id": 1,
13
+ "feed_forward_proj": "relu",
14
+ "initializer_factor": 1.0,
15
+ "is_encoder_decoder": true,
16
+ "is_gated_act": false,
17
+ "layer_norm_epsilon": 1e-06,
18
+ "model_type": "t5",
19
+ "n_positions": 512,
20
+ "num_decoder_layers": 6,
21
+ "num_heads": 8,
22
+ "num_layers": 6,
23
+ "output_past": true,
24
+ "pad_token_id": 0,
25
+ "relative_attention_max_distance": 128,
26
+ "relative_attention_num_buckets": 32,
27
+ "task_specific_params": {
28
+ "summarization": {
29
+ "early_stopping": true,
30
+ "length_penalty": 2.0,
31
+ "max_length": 200,
32
+ "min_length": 30,
33
+ "no_repeat_ngram_size": 3,
34
+ "num_beams": 4,
35
+ "prefix": "summarize: "
36
+ },
37
+ "translation_en_to_de": {
38
+ "early_stopping": true,
39
+ "max_length": 300,
40
+ "num_beams": 4,
41
+ "prefix": "translate English to German: "
42
+ },
43
+ "translation_en_to_fr": {
44
+ "early_stopping": true,
45
+ "max_length": 300,
46
+ "num_beams": 4,
47
+ "prefix": "translate English to French: "
48
+ },
49
+ "translation_en_to_ro": {
50
+ "early_stopping": true,
51
+ "max_length": 300,
52
+ "num_beams": 4,
53
+ "prefix": "translate English to Romanian: "
54
+ }
55
+ },
56
+ "torch_dtype": "float32",
57
+ "transformers_version": "4.25.1",
58
+ "use_cache": true,
59
+ "vocab_size": 32128
60
+ }
my_awesome_sum/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7360fd249c0844a6561fe1cb3c09b6ef3c298e30010f82af7756a3f3a194e334
3
+ size 242069785
my_awesome_sum/runs/Jan31_20-01-20_DESKTOP-KLKCBUF/1675177280.385954/events.out.tfevents.1675177280.DESKTOP-KLKCBUF.8104.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dcdf3676c2ab95a48e0e327785fbf42e3c328a92ef353b685a73021a40e8ef5a
3
+ size 5732
my_awesome_sum/runs/Jan31_20-01-20_DESKTOP-KLKCBUF/events.out.tfevents.1675177280.DESKTOP-KLKCBUF.8104.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f5767b704d6b1c5e43aa1fa1a7865fb4c2c87ba7a3e2c43d3ef893f44a74b158
3
+ size 4645
my_awesome_sum/runs/Jan31_20-16-04_DESKTOP-KLKCBUF/1675178164.489649/events.out.tfevents.1675178164.DESKTOP-KLKCBUF.8104.3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:09f9f0f707a08678a79dbe9c2d767c4fa8155eece008196df1d0f372810d3009
3
+ size 5732
my_awesome_sum/runs/Jan31_20-16-04_DESKTOP-KLKCBUF/events.out.tfevents.1675178164.DESKTOP-KLKCBUF.8104.2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:91b07b8c6dab1cdae47dc001457b325083398d7fc1be04a8c7346536fa823558
3
+ size 5160
my_awesome_sum/runs/Jan31_20-40-37_DESKTOP-KLKCBUF/1675179637.5629804/events.out.tfevents.1675179637.DESKTOP-KLKCBUF.8408.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7321bd13cc6053881a99fee797b79f1b1a2ce1534de079cd7833897c855bc110
3
+ size 5732
my_awesome_sum/runs/Jan31_20-40-37_DESKTOP-KLKCBUF/events.out.tfevents.1675179637.DESKTOP-KLKCBUF.8408.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:34ae99c27bbdb92be4abcff90dfc322810a923ca2c6610b24800a1d3dc967086
3
+ size 6190
my_awesome_sum/runs/Jan31_21-41-54_DESKTOP-KLKCBUF/1675183314.294505/events.out.tfevents.1675183314.DESKTOP-KLKCBUF.13020.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ba1f7366c32892d4febbf4f3116ec76d40132effe0c90102f9c3a3d0871ce9e0
3
+ size 5732
my_awesome_sum/runs/Jan31_21-41-54_DESKTOP-KLKCBUF/events.out.tfevents.1675183314.DESKTOP-KLKCBUF.13020.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e00a06db943cbe4cc33313ba15d494509a5321a54bfa6bba0bb7d293954257cf
3
+ size 7053
my_awesome_sum/special_tokens_map.json ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<extra_id_0>",
4
+ "<extra_id_1>",
5
+ "<extra_id_2>",
6
+ "<extra_id_3>",
7
+ "<extra_id_4>",
8
+ "<extra_id_5>",
9
+ "<extra_id_6>",
10
+ "<extra_id_7>",
11
+ "<extra_id_8>",
12
+ "<extra_id_9>",
13
+ "<extra_id_10>",
14
+ "<extra_id_11>",
15
+ "<extra_id_12>",
16
+ "<extra_id_13>",
17
+ "<extra_id_14>",
18
+ "<extra_id_15>",
19
+ "<extra_id_16>",
20
+ "<extra_id_17>",
21
+ "<extra_id_18>",
22
+ "<extra_id_19>",
23
+ "<extra_id_20>",
24
+ "<extra_id_21>",
25
+ "<extra_id_22>",
26
+ "<extra_id_23>",
27
+ "<extra_id_24>",
28
+ "<extra_id_25>",
29
+ "<extra_id_26>",
30
+ "<extra_id_27>",
31
+ "<extra_id_28>",
32
+ "<extra_id_29>",
33
+ "<extra_id_30>",
34
+ "<extra_id_31>",
35
+ "<extra_id_32>",
36
+ "<extra_id_33>",
37
+ "<extra_id_34>",
38
+ "<extra_id_35>",
39
+ "<extra_id_36>",
40
+ "<extra_id_37>",
41
+ "<extra_id_38>",
42
+ "<extra_id_39>",
43
+ "<extra_id_40>",
44
+ "<extra_id_41>",
45
+ "<extra_id_42>",
46
+ "<extra_id_43>",
47
+ "<extra_id_44>",
48
+ "<extra_id_45>",
49
+ "<extra_id_46>",
50
+ "<extra_id_47>",
51
+ "<extra_id_48>",
52
+ "<extra_id_49>",
53
+ "<extra_id_50>",
54
+ "<extra_id_51>",
55
+ "<extra_id_52>",
56
+ "<extra_id_53>",
57
+ "<extra_id_54>",
58
+ "<extra_id_55>",
59
+ "<extra_id_56>",
60
+ "<extra_id_57>",
61
+ "<extra_id_58>",
62
+ "<extra_id_59>",
63
+ "<extra_id_60>",
64
+ "<extra_id_61>",
65
+ "<extra_id_62>",
66
+ "<extra_id_63>",
67
+ "<extra_id_64>",
68
+ "<extra_id_65>",
69
+ "<extra_id_66>",
70
+ "<extra_id_67>",
71
+ "<extra_id_68>",
72
+ "<extra_id_69>",
73
+ "<extra_id_70>",
74
+ "<extra_id_71>",
75
+ "<extra_id_72>",
76
+ "<extra_id_73>",
77
+ "<extra_id_74>",
78
+ "<extra_id_75>",
79
+ "<extra_id_76>",
80
+ "<extra_id_77>",
81
+ "<extra_id_78>",
82
+ "<extra_id_79>",
83
+ "<extra_id_80>",
84
+ "<extra_id_81>",
85
+ "<extra_id_82>",
86
+ "<extra_id_83>",
87
+ "<extra_id_84>",
88
+ "<extra_id_85>",
89
+ "<extra_id_86>",
90
+ "<extra_id_87>",
91
+ "<extra_id_88>",
92
+ "<extra_id_89>",
93
+ "<extra_id_90>",
94
+ "<extra_id_91>",
95
+ "<extra_id_92>",
96
+ "<extra_id_93>",
97
+ "<extra_id_94>",
98
+ "<extra_id_95>",
99
+ "<extra_id_96>",
100
+ "<extra_id_97>",
101
+ "<extra_id_98>",
102
+ "<extra_id_99>"
103
+ ],
104
+ "eos_token": "</s>",
105
+ "pad_token": "<pad>",
106
+ "unk_token": "<unk>"
107
+ }
my_awesome_sum/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
my_awesome_sum/tokenizer_config.json ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<extra_id_0>",
4
+ "<extra_id_1>",
5
+ "<extra_id_2>",
6
+ "<extra_id_3>",
7
+ "<extra_id_4>",
8
+ "<extra_id_5>",
9
+ "<extra_id_6>",
10
+ "<extra_id_7>",
11
+ "<extra_id_8>",
12
+ "<extra_id_9>",
13
+ "<extra_id_10>",
14
+ "<extra_id_11>",
15
+ "<extra_id_12>",
16
+ "<extra_id_13>",
17
+ "<extra_id_14>",
18
+ "<extra_id_15>",
19
+ "<extra_id_16>",
20
+ "<extra_id_17>",
21
+ "<extra_id_18>",
22
+ "<extra_id_19>",
23
+ "<extra_id_20>",
24
+ "<extra_id_21>",
25
+ "<extra_id_22>",
26
+ "<extra_id_23>",
27
+ "<extra_id_24>",
28
+ "<extra_id_25>",
29
+ "<extra_id_26>",
30
+ "<extra_id_27>",
31
+ "<extra_id_28>",
32
+ "<extra_id_29>",
33
+ "<extra_id_30>",
34
+ "<extra_id_31>",
35
+ "<extra_id_32>",
36
+ "<extra_id_33>",
37
+ "<extra_id_34>",
38
+ "<extra_id_35>",
39
+ "<extra_id_36>",
40
+ "<extra_id_37>",
41
+ "<extra_id_38>",
42
+ "<extra_id_39>",
43
+ "<extra_id_40>",
44
+ "<extra_id_41>",
45
+ "<extra_id_42>",
46
+ "<extra_id_43>",
47
+ "<extra_id_44>",
48
+ "<extra_id_45>",
49
+ "<extra_id_46>",
50
+ "<extra_id_47>",
51
+ "<extra_id_48>",
52
+ "<extra_id_49>",
53
+ "<extra_id_50>",
54
+ "<extra_id_51>",
55
+ "<extra_id_52>",
56
+ "<extra_id_53>",
57
+ "<extra_id_54>",
58
+ "<extra_id_55>",
59
+ "<extra_id_56>",
60
+ "<extra_id_57>",
61
+ "<extra_id_58>",
62
+ "<extra_id_59>",
63
+ "<extra_id_60>",
64
+ "<extra_id_61>",
65
+ "<extra_id_62>",
66
+ "<extra_id_63>",
67
+ "<extra_id_64>",
68
+ "<extra_id_65>",
69
+ "<extra_id_66>",
70
+ "<extra_id_67>",
71
+ "<extra_id_68>",
72
+ "<extra_id_69>",
73
+ "<extra_id_70>",
74
+ "<extra_id_71>",
75
+ "<extra_id_72>",
76
+ "<extra_id_73>",
77
+ "<extra_id_74>",
78
+ "<extra_id_75>",
79
+ "<extra_id_76>",
80
+ "<extra_id_77>",
81
+ "<extra_id_78>",
82
+ "<extra_id_79>",
83
+ "<extra_id_80>",
84
+ "<extra_id_81>",
85
+ "<extra_id_82>",
86
+ "<extra_id_83>",
87
+ "<extra_id_84>",
88
+ "<extra_id_85>",
89
+ "<extra_id_86>",
90
+ "<extra_id_87>",
91
+ "<extra_id_88>",
92
+ "<extra_id_89>",
93
+ "<extra_id_90>",
94
+ "<extra_id_91>",
95
+ "<extra_id_92>",
96
+ "<extra_id_93>",
97
+ "<extra_id_94>",
98
+ "<extra_id_95>",
99
+ "<extra_id_96>",
100
+ "<extra_id_97>",
101
+ "<extra_id_98>",
102
+ "<extra_id_99>"
103
+ ],
104
+ "eos_token": "</s>",
105
+ "extra_ids": 100,
106
+ "model_max_length": 512,
107
+ "name_or_path": "t5-small",
108
+ "pad_token": "<pad>",
109
+ "special_tokens_map_file": null,
110
+ "tokenizer_class": "T5Tokenizer",
111
+ "unk_token": "<unk>"
112
+ }
my_awesome_sum/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:27a2729479e1b383a832c67d41adbc899ddea624695c5d9463aebab5cb899292
3
+ size 3515
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ openpyxl
2
+ pandas
3
+ simplet5
4
+ streamlit==1.11.0
5
+ transformers==4.16.2
6
+ stqdm
7
+ torch
8
+ bert-extractive-summarizer