Pinu12ka4 commited on
Commit
7d50134
·
1 Parent(s): 9e3c1a9

Upload 4 files

Browse files
Files changed (4) hide show
  1. GUI.py +66 -0
  2. README.md +56 -3
  3. main.ipynb +410 -0
  4. sentiment-emotion-labelled_Dell_tweets.csv +0 -0
GUI.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from PyQt5.QtWidgets import QApplication, QWidget, QLabel, QLineEdit, QVBoxLayout, QPushButton
2
+ from PyQt5.QtGui import QFont
3
+ from PyQt5.QtWidgets import QApplication, QWidget, QLabel, QLineEdit, QVBoxLayout, QPushButton, QComboBox
4
+ import tensorflow as tf
5
+ import numpy as np
6
+ import tensorflow_hub as hub
7
+ import tensorflow_text as text
8
+
9
+ print ("Loading Models.....")
10
+ Model2=tf.keras.models.load_model("./Bert_uncased_model_Tiwtter.h5",custom_objects={'KerasLayer':hub.KerasLayer})
11
+
12
+ Model1=tf.keras.models.load_model("./Bert_uncased_model_Reddit.h5",custom_objects={'KerasLayer':hub.KerasLayer})
13
+ classes=['Neutral', 'Positive', 'Negative']
14
+
15
+ def show_message():
16
+ message = input_box.text()
17
+ selected_option = dropdown.currentIndex()
18
+ Model=Model2
19
+ if (selected_option==1):
20
+ Model=Model1
21
+ ans=Model.predict([message])
22
+ i=np.argmax(ans)
23
+ catagorie=classes[i]
24
+ percentage=str(int(ans[0][i]*100))+" %"
25
+ show_text=catagorie+" - "+percentage
26
+ label.setText(show_text)
27
+
28
+ app = QApplication([])
29
+ window = QWidget()
30
+ window.setWindowTitle("Sentiment Analysis")
31
+ window.setFixedSize(600, 300) # Set a fixed window size
32
+
33
+ layout = QVBoxLayout()
34
+
35
+ label1 = QLabel("Enter a text:")
36
+ label1.setFont(QFont("Arial", 14)) # Increase the font size
37
+ layout.addWidget(label1)
38
+
39
+ dropdown = QComboBox()
40
+ dropdown.addItem("Bert uncased model Tiwtter (Model 2)")
41
+ dropdown.addItem("Bert uncased model Reddit (Model 1)")
42
+ dropdown.setFont(QFont("Arial", 12)) # Increase the font size
43
+ layout.addWidget(dropdown)
44
+
45
+ input_box = QLineEdit()
46
+ input_box.setFont(QFont("Arial", 14)) # Increase the font size
47
+ layout.addWidget(input_box)
48
+
49
+ button = QPushButton("Classify")
50
+ button.clicked.connect(show_message)
51
+ button.setFont(QFont("Arial", 14)) # Increase the font size
52
+ layout.addWidget(button)
53
+
54
+ layout.addStretch()
55
+
56
+ label = QLabel("")
57
+ label.setFont(QFont("Arial", 12)) # Increase the font size
58
+ layout.addWidget(label)
59
+ label.setContentsMargins(150, 0, 250, 0) # Add 15-pixel padding
60
+
61
+
62
+ layout.addStretch() # Add a stretchable space at the end to center-align the widgets
63
+
64
+ window.setLayout(layout)
65
+ window.show()
66
+ app.exec_()
README.md CHANGED
@@ -1,3 +1,56 @@
1
- ---
2
- license: apache-2.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # Sentiment Analysis with GUI
3
+
4
+ A Deep Learning Model which used for Sentiment analysis. The Accuracy it reach upto 85%. It train on 25000 text data.
5
+
6
+ # Neural Network Info
7
+ The bert layer is integrated in the neural network at the second layer after input layer. The 3 GRU layer is for feature extraction
8
+ then a Conv1D Layer is use after that making the output flatten and passing through a bunch of dense layer.
9
+
10
+
11
+ ## Info
12
+
13
+ 1) "Bert_uncased_model_Tiwtter.h5" has reached to the accuracy upto 85% just on 30 epochs. Loss it got is 0.51. This model is purly train in Twitter dataset.
14
+ ![SS1](https://github.com/somnathdashs/Sentiment-Analysis/blob/main/SS/Screenshot%202023-07-06%20215459.png?raw=true)
15
+
16
+ 2) "Bert_uncased_model_Reddit.h5" has reached to the accuracy upto 84% just on 35 epochs. Loss it got is 0.81. This model is not purly train in Twitter dataset but a bit of reddit's dataset is also used.
17
+ ![SS1](https://github.com/somnathdashs/Sentiment-Analysis/blob/main/SS/Screenshot%202023-07-06%20212910.png?raw=true)
18
+
19
+ Loss :- Sparse_categorical_crossentropy
20
+
21
+ Activatiion on last layer :- softmax
22
+
23
+ Note: Max input length is 768 words.
24
+
25
+
26
+ ## Screenshots
27
+
28
+ ![Preview](https://github.com/somnathdashs/Sentiment-Analysis/blob/main/SS/Screenshot%202023-07-09%20103349.png?raw=true)
29
+
30
+ ![Preview](https://github.com/somnathdashs/Sentiment-Analysis/blob/main/SS/Screenshot%202023-07-09%20103417.png?raw=true)
31
+
32
+ ![Preview](https://github.com/somnathdashs/Sentiment-Analysis/blob/main/SS/Screenshot%202023-07-09%20103425.png?raw=true)
33
+
34
+ ![Preview](https://github.com/somnathdashs/Sentiment-Analysis/blob/main/SS/Screenshot%202023-07-09%20103455.png?raw=true)
35
+
36
+ ![Preview](https://github.com/somnathdashs/Sentiment-Analysis/blob/main/SS/Screenshot%202023-07-09%20104740.png?raw=true)
37
+
38
+
39
+ ## Libray Used
40
+
41
+ ##### > opencv
42
+ ##### > tensorflow
43
+ ##### > numpy
44
+ ##### > pickle
45
+ ##### > bert
46
+ ##### > tensorflow_hub
47
+ ##### > tensorflow_text
48
+
49
+
50
+
51
+
52
+
53
+ ## Authors
54
+
55
+ - [@Somnath Dash](https://www.github.com/somnathdashs)
56
+
main.ipynb ADDED
@@ -0,0 +1,410 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "metadata": {},
7
+ "outputs": [],
8
+ "source": [
9
+ "import tensorflow as tf\n",
10
+ "import numpy as np,sklearn,os,cv2,pandas\n",
11
+ "import tensorflow_hub as hub\n",
12
+ "import tensorflow_text as text\n",
13
+ "from tensorflow.keras import Model,layers\n"
14
+ ]
15
+ },
16
+ {
17
+ "cell_type": "code",
18
+ "execution_count": 2,
19
+ "metadata": {},
20
+ "outputs": [
21
+ {
22
+ "data": {
23
+ "text/html": [
24
+ "<div>\n",
25
+ "<style scoped>\n",
26
+ " .dataframe tbody tr th:only-of-type {\n",
27
+ " vertical-align: middle;\n",
28
+ " }\n",
29
+ "\n",
30
+ " .dataframe tbody tr th {\n",
31
+ " vertical-align: top;\n",
32
+ " }\n",
33
+ "\n",
34
+ " .dataframe thead th {\n",
35
+ " text-align: right;\n",
36
+ " }\n",
37
+ "</style>\n",
38
+ "<table border=\"1\" class=\"dataframe\">\n",
39
+ " <thead>\n",
40
+ " <tr style=\"text-align: right;\">\n",
41
+ " <th></th>\n",
42
+ " <th>Unnamed: 0</th>\n",
43
+ " <th>Datetime</th>\n",
44
+ " <th>Tweet Id</th>\n",
45
+ " <th>Text</th>\n",
46
+ " <th>Username</th>\n",
47
+ " <th>sentiment</th>\n",
48
+ " <th>sentiment_score</th>\n",
49
+ " <th>emotion</th>\n",
50
+ " <th>emotion_score</th>\n",
51
+ " </tr>\n",
52
+ " </thead>\n",
53
+ " <tbody>\n",
54
+ " <tr>\n",
55
+ " <th>0</th>\n",
56
+ " <td>0</td>\n",
57
+ " <td>2022-09-30 23:29:15+00:00</td>\n",
58
+ " <td>1575991191170342912</td>\n",
59
+ " <td>@Logitech @apple @Google @Microsoft @Dell @Len...</td>\n",
60
+ " <td>ManjuSreedaran</td>\n",
61
+ " <td>neutral</td>\n",
62
+ " <td>0.853283</td>\n",
63
+ " <td>anticipation</td>\n",
64
+ " <td>0.587121</td>\n",
65
+ " </tr>\n",
66
+ " <tr>\n",
67
+ " <th>1</th>\n",
68
+ " <td>1</td>\n",
69
+ " <td>2022-09-30 21:46:35+00:00</td>\n",
70
+ " <td>1575965354425131008</td>\n",
71
+ " <td>@MK_habit_addict @official_stier @MortalKombat...</td>\n",
72
+ " <td>MiKeMcDnet</td>\n",
73
+ " <td>neutral</td>\n",
74
+ " <td>0.519470</td>\n",
75
+ " <td>joy</td>\n",
76
+ " <td>0.886913</td>\n",
77
+ " </tr>\n",
78
+ " <tr>\n",
79
+ " <th>2</th>\n",
80
+ " <td>2</td>\n",
81
+ " <td>2022-09-30 21:18:02+00:00</td>\n",
82
+ " <td>1575958171423752203</td>\n",
83
+ " <td>As @CRN celebrates its 40th anniversary, Bob F...</td>\n",
84
+ " <td>jfollett</td>\n",
85
+ " <td>positive</td>\n",
86
+ " <td>0.763791</td>\n",
87
+ " <td>joy</td>\n",
88
+ " <td>0.960347</td>\n",
89
+ " </tr>\n",
90
+ " <tr>\n",
91
+ " <th>3</th>\n",
92
+ " <td>3</td>\n",
93
+ " <td>2022-09-30 20:05:24+00:00</td>\n",
94
+ " <td>1575939891485032450</td>\n",
95
+ " <td>@dell your customer service is horrible especi...</td>\n",
96
+ " <td>daveccarr</td>\n",
97
+ " <td>negative</td>\n",
98
+ " <td>0.954023</td>\n",
99
+ " <td>anger</td>\n",
100
+ " <td>0.983203</td>\n",
101
+ " </tr>\n",
102
+ " <tr>\n",
103
+ " <th>4</th>\n",
104
+ " <td>4</td>\n",
105
+ " <td>2022-09-30 20:03:17+00:00</td>\n",
106
+ " <td>1575939359160750080</td>\n",
107
+ " <td>@zacokalo @Dell @DellCares @Dell give the man ...</td>\n",
108
+ " <td>heycamella</td>\n",
109
+ " <td>neutral</td>\n",
110
+ " <td>0.529170</td>\n",
111
+ " <td>anger</td>\n",
112
+ " <td>0.776124</td>\n",
113
+ " </tr>\n",
114
+ " </tbody>\n",
115
+ "</table>\n",
116
+ "</div>"
117
+ ],
118
+ "text/plain": [
119
+ " Unnamed: 0 Datetime Tweet Id \\\n",
120
+ "0 0 2022-09-30 23:29:15+00:00 1575991191170342912 \n",
121
+ "1 1 2022-09-30 21:46:35+00:00 1575965354425131008 \n",
122
+ "2 2 2022-09-30 21:18:02+00:00 1575958171423752203 \n",
123
+ "3 3 2022-09-30 20:05:24+00:00 1575939891485032450 \n",
124
+ "4 4 2022-09-30 20:03:17+00:00 1575939359160750080 \n",
125
+ "\n",
126
+ " Text Username \\\n",
127
+ "0 @Logitech @apple @Google @Microsoft @Dell @Len... ManjuSreedaran \n",
128
+ "1 @MK_habit_addict @official_stier @MortalKombat... MiKeMcDnet \n",
129
+ "2 As @CRN celebrates its 40th anniversary, Bob F... jfollett \n",
130
+ "3 @dell your customer service is horrible especi... daveccarr \n",
131
+ "4 @zacokalo @Dell @DellCares @Dell give the man ... heycamella \n",
132
+ "\n",
133
+ " sentiment sentiment_score emotion emotion_score \n",
134
+ "0 neutral 0.853283 anticipation 0.587121 \n",
135
+ "1 neutral 0.519470 joy 0.886913 \n",
136
+ "2 positive 0.763791 joy 0.960347 \n",
137
+ "3 negative 0.954023 anger 0.983203 \n",
138
+ "4 neutral 0.529170 anger 0.776124 "
139
+ ]
140
+ },
141
+ "execution_count": 2,
142
+ "metadata": {},
143
+ "output_type": "execute_result"
144
+ }
145
+ ],
146
+ "source": [
147
+ "df=pandas.read_csv(\"./sentiment-emotion-labelled_Dell_tweets.csv\")\n",
148
+ "df.head()"
149
+ ]
150
+ },
151
+ {
152
+ "cell_type": "code",
153
+ "execution_count": 3,
154
+ "metadata": {},
155
+ "outputs": [
156
+ {
157
+ "data": {
158
+ "text/plain": [
159
+ "array(['neutral', 'positive', 'negative'], dtype=object)"
160
+ ]
161
+ },
162
+ "execution_count": 3,
163
+ "metadata": {},
164
+ "output_type": "execute_result"
165
+ }
166
+ ],
167
+ "source": [
168
+ "X,Y=df.Text,df.sentiment\n",
169
+ "Y.unique()"
170
+ ]
171
+ },
172
+ {
173
+ "cell_type": "code",
174
+ "execution_count": 4,
175
+ "metadata": {},
176
+ "outputs": [],
177
+ "source": [
178
+ "Classes=['neutral', 'positive', 'negative']"
179
+ ]
180
+ },
181
+ {
182
+ "cell_type": "code",
183
+ "execution_count": 5,
184
+ "metadata": {},
185
+ "outputs": [
186
+ {
187
+ "data": {
188
+ "text/plain": [
189
+ "((24970,), (24970,))"
190
+ ]
191
+ },
192
+ "execution_count": 5,
193
+ "metadata": {},
194
+ "output_type": "execute_result"
195
+ }
196
+ ],
197
+ "source": [
198
+ "X.shape,Y.shape"
199
+ ]
200
+ },
201
+ {
202
+ "cell_type": "code",
203
+ "execution_count": 6,
204
+ "metadata": {},
205
+ "outputs": [],
206
+ "source": [
207
+ "X=np.array(X)\n",
208
+ "Y=np.array(Y)"
209
+ ]
210
+ },
211
+ {
212
+ "cell_type": "code",
213
+ "execution_count": 15,
214
+ "metadata": {},
215
+ "outputs": [],
216
+ "source": [
217
+ "bert_preprocess = hub.KerasLayer(\n",
218
+ " \"https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3\")\n",
219
+ "bert_encoder = hub.KerasLayer(\n",
220
+ " \"https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/4\")"
221
+ ]
222
+ },
223
+ {
224
+ "cell_type": "code",
225
+ "execution_count": 8,
226
+ "metadata": {},
227
+ "outputs": [
228
+ {
229
+ "data": {
230
+ "text/plain": [
231
+ "((24970,), (24970,))"
232
+ ]
233
+ },
234
+ "execution_count": 8,
235
+ "metadata": {},
236
+ "output_type": "execute_result"
237
+ }
238
+ ],
239
+ "source": [
240
+ "New_Y=[]\n",
241
+ "for i,j in enumerate(Y):\n",
242
+ " label=-2\n",
243
+ " for k,l in enumerate(Classes):\n",
244
+ " if l==j:\n",
245
+ " label=k\n",
246
+ " New_Y.append(label)\n",
247
+ "New_Y=np.array(New_Y)\n",
248
+ "New_Y.shape,X.shape\n",
249
+ "# 0 => Neutral and 1 => positive and 2 => negative"
250
+ ]
251
+ },
252
+ {
253
+ "cell_type": "code",
254
+ "execution_count": 9,
255
+ "metadata": {},
256
+ "outputs": [],
257
+ "source": [
258
+ "from sklearn.model_selection import train_test_split as tts\n",
259
+ "X_train, X_test, y_train, y_test = tts(X,New_Y,test_size=0.2, random_state=42,shuffle=True)"
260
+ ]
261
+ },
262
+ {
263
+ "cell_type": "code",
264
+ "execution_count": 10,
265
+ "metadata": {},
266
+ "outputs": [
267
+ {
268
+ "data": {
269
+ "text/plain": [
270
+ "((19976,), (4994,), (19976,), (4994,))"
271
+ ]
272
+ },
273
+ "execution_count": 10,
274
+ "metadata": {},
275
+ "output_type": "execute_result"
276
+ }
277
+ ],
278
+ "source": [
279
+ "X_train.shape,X_test.shape,y_train.shape,y_test.shape"
280
+ ]
281
+ },
282
+ {
283
+ "cell_type": "code",
284
+ "execution_count": 11,
285
+ "metadata": {},
286
+ "outputs": [
287
+ {
288
+ "data": {
289
+ "text/plain": [
290
+ "(\"@Dell Apparently your company doesn't think that a swollen battery on a laptop that isn't even a year old is an issue. You are not honoring your warranty and overall putting me in the position where I am a walking fire hazard which as a customer for years now isn't right.\",\n",
291
+ " 2)"
292
+ ]
293
+ },
294
+ "execution_count": 11,
295
+ "metadata": {},
296
+ "output_type": "execute_result"
297
+ }
298
+ ],
299
+ "source": [
300
+ "X_train[0],y_train[0]"
301
+ ]
302
+ },
303
+ {
304
+ "cell_type": "code",
305
+ "execution_count": 12,
306
+ "metadata": {},
307
+ "outputs": [
308
+ {
309
+ "ename": "NameError",
310
+ "evalue": "name 'bert_preprocess' is not defined",
311
+ "output_type": "error",
312
+ "traceback": [
313
+ "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
314
+ "\u001b[1;31mNameError\u001b[0m Traceback (most recent call last)",
315
+ "\u001b[1;32m~\\AppData\\Local\\Temp\\ipykernel_12248\\1158974034.py\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m 1\u001b[0m \u001b[0mtextinput\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mlayers\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mInput\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mshape\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mdtype\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mtf\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mstring\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mname\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;34m\"text\"\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 2\u001b[1;33m \u001b[0mpre\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mbert_preprocess\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtextinput\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 3\u001b[0m \u001b[0mend\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mbert_encoder\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mpre\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 4\u001b[0m \u001b[1;31m# NN Layer\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 5\u001b[0m \u001b[0mpo\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mend\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;34m\"sequence_output\"\u001b[0m\u001b[1;33m]\u001b[0m \u001b[1;31m#[\"pooled_output\"]\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
316
+ "\u001b[1;31mNameError\u001b[0m: name 'bert_preprocess' is not defined"
317
+ ]
318
+ }
319
+ ],
320
+ "source": [
321
+ "textinput=layers.Input(shape=(),dtype=tf.string,name=\"text\")\n",
322
+ "pre=bert_preprocess(textinput)\n",
323
+ "end=bert_encoder(pre)\n",
324
+ "# NN Layer\n",
325
+ "po=end[\"sequence_output\"] #[\"pooled_output\"]\n",
326
+ "l=layers.Bidirectional(layers.GRU(128, return_sequences=True))(po)\n",
327
+ "l=layers.Bidirectional(layers.GRU(64, return_sequences=True))(l)\n",
328
+ "l=layers.Conv1D(64, 1, activation=\"relu\", padding='same')(l)\n",
329
+ "l=layers.Flatten()(l)\n",
330
+ "l=layers.Dropout(0.2)(l)\n",
331
+ "l=layers.Dense(128,activation=\"relu\",name=\"input\")(l)\n",
332
+ "l=layers.Dense(64,activation=\"relu\",name=\"in\")(l)\n",
333
+ "l=layers.Dense(16,activation=\"relu\",name=\"in1\")(l)\n",
334
+ "l=layers.Dropout(0.2)(l)\n",
335
+ "l=layers.Dense(3,activation=\"softmax\",name=\"output\")(l) #[0.2,0.3,0.5] = 1\n",
336
+ "Model=tf.keras.Model(inputs=textinput,outputs=[l])\n"
337
+ ]
338
+ },
339
+ {
340
+ "cell_type": "code",
341
+ "execution_count": 30,
342
+ "metadata": {},
343
+ "outputs": [],
344
+ "source": [
345
+ "Model.compile(\"adam\",loss=\"sparse_categorical_crossentropy\",metrics=[\"accuracy\"])\n",
346
+ "Model.summary()"
347
+ ]
348
+ },
349
+ {
350
+ "cell_type": "code",
351
+ "execution_count": 31,
352
+ "metadata": {},
353
+ "outputs": [],
354
+ "source": [
355
+ "hist=Model.fit(X_train,y_train,epochs=30,batch_size=80)"
356
+ ]
357
+ },
358
+ {
359
+ "cell_type": "code",
360
+ "execution_count": null,
361
+ "metadata": {},
362
+ "outputs": [],
363
+ "source": [
364
+ "Model.evaluate(X_test,y_test)"
365
+ ]
366
+ },
367
+ {
368
+ "cell_type": "code",
369
+ "execution_count": null,
370
+ "metadata": {},
371
+ "outputs": [],
372
+ "source": [
373
+ "Model.save(\"./Model_3_Bert_Uncase.h5\")"
374
+ ]
375
+ },
376
+ {
377
+ "cell_type": "code",
378
+ "execution_count": null,
379
+ "metadata": {},
380
+ "outputs": [],
381
+ "source": [
382
+ "A=Model.predict([\"you awake from a deep trance, step away from the phone to see your friends & family\"])\n",
383
+ "a=np.argmax(A)\n",
384
+ "Classes[a],int(A[0][a]*100)"
385
+ ]
386
+ }
387
+ ],
388
+ "metadata": {
389
+ "kernelspec": {
390
+ "display_name": "Python 3",
391
+ "language": "python",
392
+ "name": "python3"
393
+ },
394
+ "language_info": {
395
+ "codemirror_mode": {
396
+ "name": "ipython",
397
+ "version": 3
398
+ },
399
+ "file_extension": ".py",
400
+ "mimetype": "text/x-python",
401
+ "name": "python",
402
+ "nbconvert_exporter": "python",
403
+ "pygments_lexer": "ipython3",
404
+ "version": "3.7.2"
405
+ },
406
+ "orig_nbformat": 4
407
+ },
408
+ "nbformat": 4,
409
+ "nbformat_minor": 2
410
+ }
sentiment-emotion-labelled_Dell_tweets.csv ADDED
The diff for this file is too large to render. See raw diff