Rajksv commited on
Commit
5cf220c
1 Parent(s): 0da64a7

Upload 5 files

Browse files
app.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import tensorflow as tf
3
+ import tensorflow_hub as hub
4
+
5
+
6
+
7
+
8
+ new_model = tf.keras.models.load_model(best_model.h5,custom_objects={KerasLayer hub.KerasLayer})
9
+
10
+
11
+
12
+
13
+
14
+ def welcome()
15
+ return Welcome to my app
16
+
17
+
18
+ def main()
19
+ st.title(Financial News Sentiment Analysis App)
20
+ st.write(
21
+ This app will tell you if mention news is Fake or Real by using Natural Language Processing)
22
+ html_temp =
23
+ div style=background-colortomato;padding10px
24
+ h2 style=colorwhite;text-aligncenter;Financial News Sentiment Analysis h2
25
+ div
26
+
27
+ st.markdown(html_temp, unsafe_allow_html=True)
28
+
29
+ text = st.text_input(Enter your Financial News)
30
+
31
+
32
+ if st.button(Predict)
33
+ pred_prob = new_model.predict([text])
34
+ predict = tf.squeeze(tf.round(pred_prob)).numpy()
35
+ st.subheader(AI thinks that ...)
36
+
37
+ if predict 0
38
+
39
+ st.success(
40
+ fIt's a Positive News.You can make your investment decision accordingly. Confidence Level is {tf.round(pred_prob, 3)}%,icon=✅)
41
+ else
42
+ st.warning(
43
+ fIt's a Negative News. Think twice before you take any investment decision. Confidence Level is {tf.round(100 - pred_prob, 3)}%, icon=⚠️)
44
+
45
+ if st.button(About)
46
+
47
+ st.text(Built with Streamlit)
48
+
49
+
50
+ if __name__ == '__main__'
51
+ main()
best_model.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:59f1467603bec5ced69d495d4d91cc6dd8c2cc15b8c8948aa6a6acd054b09812
3
+ size 598267328
financial_news_sentiment_analysis.ipynb ADDED
@@ -0,0 +1,996 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {
6
+ "id": "RF9ztNQ_HH1S"
7
+ },
8
+ "source": [
9
+ "#Financial News Sentiment Analysis\n",
10
+ "\n",
11
+ "\n",
12
+ "> About the Dataset\n",
13
+ "\n",
14
+ "India financial news sentiment analysis dataset compiled together.\n",
15
+ "\n",
16
+ "Date range: Jan 1, 2017 to April 15, 2021\n",
17
+ "\n",
18
+ "News sources:\n",
19
+ "Indian sources: Economic Times, Money Control, Livemint, Business Today, Financial Express\n",
20
+ "Foreign sources: NY Times, WSJ, Washington Post\n",
21
+ "\n",
22
+ "Keywords:\n",
23
+ "Indian sources: \"economy\" or \"markets\" or \"inflation\"\n",
24
+ "Foreign sources: \"Indian economy\" OR \"India economy\" OR \"Indian businesses\" OR \"Indian business\"\n",
25
+ "\n",
26
+ "Sentiment analysis: Performed using flair NLP model. All confidence scores for NEGATIVE sentiment datapoints have been multiplied by -1 from the original flair output. Basic cleanup of data done to remove repetition of headlines and all headlines less than 30 characters are ignored.\n",
27
+ "\n",
28
+ "Acknowledgements: GDELT Headline Scrape script from Prof. Ken Blake (https://drkblake.com/gdeltheadlinescrape/) has been used to generate the news headlines dataset.\n",
29
+ "\n",
30
+ "Motivation: The intent of generating this data was to compile recent years financial news headlines for India and perform sentiment analysis on it.\n",
31
+ "\n"
32
+ ]
33
+ },
34
+ {
35
+ "cell_type": "markdown",
36
+ "metadata": {
37
+ "id": "Vjz5cRWbG1dX"
38
+ },
39
+ "source": [
40
+ "Connecting Google Colab to Kaggle to get Dataset directly to colab"
41
+ ]
42
+ },
43
+ {
44
+ "cell_type": "markdown",
45
+ "metadata": {
46
+ "id": "Z40L2dMJIKDV"
47
+ },
48
+ "source": [
49
+ "Downloading the helper functions designed by mrdbourke which contains custom functions"
50
+ ]
51
+ },
52
+ {
53
+ "cell_type": "code",
54
+ "execution_count": null,
55
+ "metadata": {
56
+ "colab": {
57
+ "base_uri": "https://localhost:8080/"
58
+ },
59
+ "id": "14lcGF09ycxN",
60
+ "outputId": "90e302ac-851b-4030-d125-25cb66d9bf1c"
61
+ },
62
+ "outputs": [],
63
+ "source": [
64
+ "! wget https://raw.githubusercontent.com/mrdbourke/tensorflow-deep-learning/main/extras/helper_functions.py"
65
+ ]
66
+ },
67
+ {
68
+ "cell_type": "markdown",
69
+ "metadata": {
70
+ "id": "cLQa4EnfIcoZ"
71
+ },
72
+ "source": [
73
+ "Importing required functions from helper functions"
74
+ ]
75
+ },
76
+ {
77
+ "cell_type": "code",
78
+ "execution_count": 72,
79
+ "metadata": {
80
+ "id": "PPGYFIrMyhdW"
81
+ },
82
+ "outputs": [],
83
+ "source": [
84
+ "from helper_functions import unzip_data, plot_loss_curves, make_confusion_matrix"
85
+ ]
86
+ },
87
+ {
88
+ "cell_type": "markdown",
89
+ "metadata": {
90
+ "id": "AHhaRpRiItVb"
91
+ },
92
+ "source": [
93
+ "Importing required libraries"
94
+ ]
95
+ },
96
+ {
97
+ "cell_type": "code",
98
+ "execution_count": 73,
99
+ "metadata": {
100
+ "id": "M695fmj2yxeY"
101
+ },
102
+ "outputs": [],
103
+ "source": [
104
+ "import pandas as pd\n",
105
+ "import numpy as np\n",
106
+ "import tensorflow as tf\n",
107
+ "import tensorflow_hub as hub\n",
108
+ "from tensorflow.keras import layers"
109
+ ]
110
+ },
111
+ {
112
+ "cell_type": "markdown",
113
+ "metadata": {
114
+ "id": "KzUslyaWI4y9"
115
+ },
116
+ "source": [
117
+ "#Part 1 : Data Preprocessing"
118
+ ]
119
+ },
120
+ {
121
+ "cell_type": "markdown",
122
+ "metadata": {
123
+ "id": "uJxrzwbbJFEP"
124
+ },
125
+ "source": [
126
+ "importing the dataset "
127
+ ]
128
+ },
129
+ {
130
+ "cell_type": "code",
131
+ "execution_count": 74,
132
+ "metadata": {
133
+ "id": "y_Xhn09i1x4k"
134
+ },
135
+ "outputs": [],
136
+ "source": [
137
+ "df = pd.read_csv(\"News_sentiment_Jan2017_to_Apr2021.csv\")"
138
+ ]
139
+ },
140
+ {
141
+ "cell_type": "code",
142
+ "execution_count": 75,
143
+ "metadata": {
144
+ "colab": {
145
+ "base_uri": "https://localhost:8080/",
146
+ "height": 206
147
+ },
148
+ "id": "p8mdpAH52vOO",
149
+ "outputId": "2c369bd7-2f4a-4420-9dfb-8bf695491435"
150
+ },
151
+ "outputs": [
152
+ {
153
+ "data": {
154
+ "text/html": [
155
+ "<div>\n",
156
+ "<style scoped>\n",
157
+ " .dataframe tbody tr th:only-of-type {\n",
158
+ " vertical-align: middle;\n",
159
+ " }\n",
160
+ "\n",
161
+ " .dataframe tbody tr th {\n",
162
+ " vertical-align: top;\n",
163
+ " }\n",
164
+ "\n",
165
+ " .dataframe thead th {\n",
166
+ " text-align: right;\n",
167
+ " }\n",
168
+ "</style>\n",
169
+ "<table border=\"1\" class=\"dataframe\">\n",
170
+ " <thead>\n",
171
+ " <tr style=\"text-align: right;\">\n",
172
+ " <th></th>\n",
173
+ " <th>Date</th>\n",
174
+ " <th>Title</th>\n",
175
+ " <th>URL</th>\n",
176
+ " <th>sentiment</th>\n",
177
+ " <th>confidence</th>\n",
178
+ " <th>Unnamed: 5</th>\n",
179
+ " </tr>\n",
180
+ " </thead>\n",
181
+ " <tbody>\n",
182
+ " <tr>\n",
183
+ " <th>0</th>\n",
184
+ " <td>05/01/17</td>\n",
185
+ " <td>Eliminating shadow economy to have positive im...</td>\n",
186
+ " <td>http://economictimes.indiatimes.com/news/econo...</td>\n",
187
+ " <td>POSITIVE</td>\n",
188
+ " <td>0.996185</td>\n",
189
+ " <td>NaN</td>\n",
190
+ " </tr>\n",
191
+ " <tr>\n",
192
+ " <th>1</th>\n",
193
+ " <td>05/01/17</td>\n",
194
+ " <td>Two Chinese companies hit roadblock with India...</td>\n",
195
+ " <td>http://economictimes.indiatimes.com/news/econo...</td>\n",
196
+ " <td>NEGATIVE</td>\n",
197
+ " <td>-0.955493</td>\n",
198
+ " <td>NaN</td>\n",
199
+ " </tr>\n",
200
+ " <tr>\n",
201
+ " <th>2</th>\n",
202
+ " <td>05/01/17</td>\n",
203
+ " <td>SoftBank India Vision gets new $100</td>\n",
204
+ " <td>http://economictimes.indiatimes.com/small-biz/...</td>\n",
205
+ " <td>POSITIVE</td>\n",
206
+ " <td>0.595612</td>\n",
207
+ " <td>NaN</td>\n",
208
+ " </tr>\n",
209
+ " <tr>\n",
210
+ " <th>3</th>\n",
211
+ " <td>05/01/17</td>\n",
212
+ " <td>Nissan halts joint development of luxury cars ...</td>\n",
213
+ " <td>http://economictimes.indiatimes.com/news/inter...</td>\n",
214
+ " <td>NEGATIVE</td>\n",
215
+ " <td>-0.996672</td>\n",
216
+ " <td>NaN</td>\n",
217
+ " </tr>\n",
218
+ " <tr>\n",
219
+ " <th>4</th>\n",
220
+ " <td>05/01/17</td>\n",
221
+ " <td>Despite challenges Rajasthan continues to prog...</td>\n",
222
+ " <td>http://economictimes.indiatimes.com/news/polit...</td>\n",
223
+ " <td>POSITIVE</td>\n",
224
+ " <td>0.997388</td>\n",
225
+ " <td>NaN</td>\n",
226
+ " </tr>\n",
227
+ " </tbody>\n",
228
+ "</table>\n",
229
+ "</div>"
230
+ ],
231
+ "text/plain": [
232
+ " Date Title \\\n",
233
+ "0 05/01/17 Eliminating shadow economy to have positive im... \n",
234
+ "1 05/01/17 Two Chinese companies hit roadblock with India... \n",
235
+ "2 05/01/17 SoftBank India Vision gets new $100 \n",
236
+ "3 05/01/17 Nissan halts joint development of luxury cars ... \n",
237
+ "4 05/01/17 Despite challenges Rajasthan continues to prog... \n",
238
+ "\n",
239
+ " URL sentiment confidence \\\n",
240
+ "0 http://economictimes.indiatimes.com/news/econo... POSITIVE 0.996185 \n",
241
+ "1 http://economictimes.indiatimes.com/news/econo... NEGATIVE -0.955493 \n",
242
+ "2 http://economictimes.indiatimes.com/small-biz/... POSITIVE 0.595612 \n",
243
+ "3 http://economictimes.indiatimes.com/news/inter... NEGATIVE -0.996672 \n",
244
+ "4 http://economictimes.indiatimes.com/news/polit... POSITIVE 0.997388 \n",
245
+ "\n",
246
+ " Unnamed: 5 \n",
247
+ "0 NaN \n",
248
+ "1 NaN \n",
249
+ "2 NaN \n",
250
+ "3 NaN \n",
251
+ "4 NaN "
252
+ ]
253
+ },
254
+ "execution_count": 75,
255
+ "metadata": {},
256
+ "output_type": "execute_result"
257
+ }
258
+ ],
259
+ "source": [
260
+ "df.head()"
261
+ ]
262
+ },
263
+ {
264
+ "cell_type": "markdown",
265
+ "metadata": {
266
+ "id": "nIH1zFlOJTj_"
267
+ },
268
+ "source": [
269
+ "Label Encoding to sentiment column\n",
270
+ "\n",
271
+ "\n",
272
+ "> Details\n",
273
+ "\n",
274
+ "As we are dealing with binary classification, we need to convert sentiment column class name (\"POSITIVE\", \"NEGATIVE\") to binary(0,1) because we are going to process this data to Neural Network , the class value must be in binary for this problem\n",
275
+ "\n"
276
+ ]
277
+ },
278
+ {
279
+ "cell_type": "code",
280
+ "execution_count": 76,
281
+ "metadata": {
282
+ "id": "OwTPfIzw22rX"
283
+ },
284
+ "outputs": [],
285
+ "source": [
286
+ "from sklearn.preprocessing import LabelEncoder\n",
287
+ "le = LabelEncoder()\n",
288
+ "df['sentiment'] = le.fit_transform(df['sentiment'])"
289
+ ]
290
+ },
291
+ {
292
+ "cell_type": "code",
293
+ "execution_count": 77,
294
+ "metadata": {
295
+ "colab": {
296
+ "base_uri": "https://localhost:8080/",
297
+ "height": 206
298
+ },
299
+ "id": "Fth-JdqV3P40",
300
+ "outputId": "d4df2032-3e3a-4a60-a092-6c54849a08f4"
301
+ },
302
+ "outputs": [
303
+ {
304
+ "data": {
305
+ "text/html": [
306
+ "<div>\n",
307
+ "<style scoped>\n",
308
+ " .dataframe tbody tr th:only-of-type {\n",
309
+ " vertical-align: middle;\n",
310
+ " }\n",
311
+ "\n",
312
+ " .dataframe tbody tr th {\n",
313
+ " vertical-align: top;\n",
314
+ " }\n",
315
+ "\n",
316
+ " .dataframe thead th {\n",
317
+ " text-align: right;\n",
318
+ " }\n",
319
+ "</style>\n",
320
+ "<table border=\"1\" class=\"dataframe\">\n",
321
+ " <thead>\n",
322
+ " <tr style=\"text-align: right;\">\n",
323
+ " <th></th>\n",
324
+ " <th>Date</th>\n",
325
+ " <th>Title</th>\n",
326
+ " <th>URL</th>\n",
327
+ " <th>sentiment</th>\n",
328
+ " <th>confidence</th>\n",
329
+ " <th>Unnamed: 5</th>\n",
330
+ " </tr>\n",
331
+ " </thead>\n",
332
+ " <tbody>\n",
333
+ " <tr>\n",
334
+ " <th>0</th>\n",
335
+ " <td>05/01/17</td>\n",
336
+ " <td>Eliminating shadow economy to have positive im...</td>\n",
337
+ " <td>http://economictimes.indiatimes.com/news/econo...</td>\n",
338
+ " <td>1</td>\n",
339
+ " <td>0.996185</td>\n",
340
+ " <td>NaN</td>\n",
341
+ " </tr>\n",
342
+ " <tr>\n",
343
+ " <th>1</th>\n",
344
+ " <td>05/01/17</td>\n",
345
+ " <td>Two Chinese companies hit roadblock with India...</td>\n",
346
+ " <td>http://economictimes.indiatimes.com/news/econo...</td>\n",
347
+ " <td>0</td>\n",
348
+ " <td>-0.955493</td>\n",
349
+ " <td>NaN</td>\n",
350
+ " </tr>\n",
351
+ " <tr>\n",
352
+ " <th>2</th>\n",
353
+ " <td>05/01/17</td>\n",
354
+ " <td>SoftBank India Vision gets new $100</td>\n",
355
+ " <td>http://economictimes.indiatimes.com/small-biz/...</td>\n",
356
+ " <td>1</td>\n",
357
+ " <td>0.595612</td>\n",
358
+ " <td>NaN</td>\n",
359
+ " </tr>\n",
360
+ " <tr>\n",
361
+ " <th>3</th>\n",
362
+ " <td>05/01/17</td>\n",
363
+ " <td>Nissan halts joint development of luxury cars ...</td>\n",
364
+ " <td>http://economictimes.indiatimes.com/news/inter...</td>\n",
365
+ " <td>0</td>\n",
366
+ " <td>-0.996672</td>\n",
367
+ " <td>NaN</td>\n",
368
+ " </tr>\n",
369
+ " <tr>\n",
370
+ " <th>4</th>\n",
371
+ " <td>05/01/17</td>\n",
372
+ " <td>Despite challenges Rajasthan continues to prog...</td>\n",
373
+ " <td>http://economictimes.indiatimes.com/news/polit...</td>\n",
374
+ " <td>1</td>\n",
375
+ " <td>0.997388</td>\n",
376
+ " <td>NaN</td>\n",
377
+ " </tr>\n",
378
+ " </tbody>\n",
379
+ "</table>\n",
380
+ "</div>"
381
+ ],
382
+ "text/plain": [
383
+ " Date Title \\\n",
384
+ "0 05/01/17 Eliminating shadow economy to have positive im... \n",
385
+ "1 05/01/17 Two Chinese companies hit roadblock with India... \n",
386
+ "2 05/01/17 SoftBank India Vision gets new $100 \n",
387
+ "3 05/01/17 Nissan halts joint development of luxury cars ... \n",
388
+ "4 05/01/17 Despite challenges Rajasthan continues to prog... \n",
389
+ "\n",
390
+ " URL sentiment confidence \\\n",
391
+ "0 http://economictimes.indiatimes.com/news/econo... 1 0.996185 \n",
392
+ "1 http://economictimes.indiatimes.com/news/econo... 0 -0.955493 \n",
393
+ "2 http://economictimes.indiatimes.com/small-biz/... 1 0.595612 \n",
394
+ "3 http://economictimes.indiatimes.com/news/inter... 0 -0.996672 \n",
395
+ "4 http://economictimes.indiatimes.com/news/polit... 1 0.997388 \n",
396
+ "\n",
397
+ " Unnamed: 5 \n",
398
+ "0 NaN \n",
399
+ "1 NaN \n",
400
+ "2 NaN \n",
401
+ "3 NaN \n",
402
+ "4 NaN "
403
+ ]
404
+ },
405
+ "execution_count": 77,
406
+ "metadata": {},
407
+ "output_type": "execute_result"
408
+ }
409
+ ],
410
+ "source": [
411
+ "df.head()"
412
+ ]
413
+ },
414
+ {
415
+ "cell_type": "markdown",
416
+ "metadata": {
417
+ "id": "FzdHcZ3aLMqQ"
418
+ },
419
+ "source": [
420
+ "Spliting the data into train_sentences, val_sentences, train_labels, val_labels"
421
+ ]
422
+ },
423
+ {
424
+ "cell_type": "code",
425
+ "execution_count": 78,
426
+ "metadata": {
427
+ "id": "WjimSV9u3t35"
428
+ },
429
+ "outputs": [],
430
+ "source": [
431
+ "from sklearn.model_selection import train_test_split\n",
432
+ "train_sentences, val_sentences, train_labels, val_labels = train_test_split(df['Title'].to_numpy(),\n",
433
+ " df['sentiment'].to_numpy(),\n",
434
+ " test_size = 0.2,\n",
435
+ " random_state = 42)"
436
+ ]
437
+ },
438
+ {
439
+ "cell_type": "markdown",
440
+ "metadata": {
441
+ "id": "C9UPIx2bLWky"
442
+ },
443
+ "source": [
444
+ "Create datasets (as fast as possible)\n",
445
+ "\n",
446
+ "\n",
447
+ "\n",
448
+ "> tf.data: Build TensorFlow input pipelines and better performance with the tf.data API\n",
449
+ " \n",
450
+ "\n",
451
+ "we'll ensure TensorFlow loads our data onto the GPU as fast as possible, in turn leading to faster training time.\n",
452
+ "\n"
453
+ ]
454
+ },
455
+ {
456
+ "cell_type": "code",
457
+ "execution_count": 79,
458
+ "metadata": {
459
+ "id": "ZkJIEJ7G8A_6"
460
+ },
461
+ "outputs": [],
462
+ "source": [
463
+ "train_dataset = tf.data.Dataset.from_tensor_slices((train_sentences, train_labels))\n",
464
+ "valid_dataset = tf.data.Dataset.from_tensor_slices((val_sentences, val_labels))\n",
465
+ "\n"
466
+ ]
467
+ },
468
+ {
469
+ "cell_type": "code",
470
+ "execution_count": 80,
471
+ "metadata": {
472
+ "id": "kRa7yT738u5R"
473
+ },
474
+ "outputs": [],
475
+ "source": [
476
+ "train_dataset = train_dataset.batch(32).prefetch(tf.data.AUTOTUNE)\n",
477
+ "valid_dataset = valid_dataset.batch(32).prefetch(tf.data.AUTOTUNE)"
478
+ ]
479
+ },
480
+ {
481
+ "cell_type": "markdown",
482
+ "metadata": {
483
+ "id": "WUnNiVmfMp05"
484
+ },
485
+ "source": [
486
+ "#Part 2 : Embeding the Inputs (sentences) using Transfer Learning\n",
487
+ "\n",
488
+ "\n",
489
+ "\n",
490
+ "> Converting text into numbers\n",
491
+ "\n",
492
+ "you can build your own tokenizer and embedding layer but for this problem im gonna using pre-trained word embeddings i.e Universal Sentence Encoder\n",
493
+ "\n",
494
+ "\n"
495
+ ]
496
+ },
497
+ {
498
+ "cell_type": "markdown",
499
+ "metadata": {
500
+ "id": "ELexOyhdOTiz"
501
+ },
502
+ "source": [
503
+ "loading pretrained model from hub to colab"
504
+ ]
505
+ },
506
+ {
507
+ "cell_type": "code",
508
+ "execution_count": 81,
509
+ "metadata": {
510
+ "id": "38XlQgNi4k9S"
511
+ },
512
+ "outputs": [],
513
+ "source": [
514
+ "embed = hub.load('https://tfhub.dev/google/universal-sentence-encoder-large/5')"
515
+ ]
516
+ },
517
+ {
518
+ "cell_type": "markdown",
519
+ "metadata": {
520
+ "id": "rfqwvk0iOZXl"
521
+ },
522
+ "source": [
523
+ "creating sentence encoder layer which we gonna add in neural network"
524
+ ]
525
+ },
526
+ {
527
+ "cell_type": "code",
528
+ "execution_count": 82,
529
+ "metadata": {
530
+ "id": "ZrC3uZDj4v8F"
531
+ },
532
+ "outputs": [],
533
+ "source": [
534
+ "sentence_encoder_layer = hub.KerasLayer(\"https://tfhub.dev/google/universal-sentence-encoder-large/5\", input_shape = [], dtype = \"string\")"
535
+ ]
536
+ },
537
+ {
538
+ "cell_type": "markdown",
539
+ "metadata": {
540
+ "id": "X_zHSSw_OlVn"
541
+ },
542
+ "source": [
543
+ "#Part 3 : Build the Deep Learning Model "
544
+ ]
545
+ },
546
+ {
547
+ "cell_type": "markdown",
548
+ "metadata": {
549
+ "id": "yiIOOVtUOwAA"
550
+ },
551
+ "source": [
552
+ "Building LSTM Model using Functional Api"
553
+ ]
554
+ },
555
+ {
556
+ "cell_type": "code",
557
+ "execution_count": 83,
558
+ "metadata": {
559
+ "id": "57otrTCz58va"
560
+ },
561
+ "outputs": [],
562
+ "source": [
563
+ "inputs = layers.Input(shape = [], dtype = \"string\", name = \"input_layer\")\n",
564
+ "x = sentence_encoder_layer(inputs)\n",
565
+ "x = tf.expand_dims(x, axis = 1)\n",
566
+ "x = layers.Bidirectional(layers.LSTM(72, return_sequences = True))(x)\n",
567
+ "x = layers.Dropout(0.5)(x)\n",
568
+ "x = layers.Bidirectional(layers.LSTM(72, return_sequences = True))(x)\n",
569
+ "x = layers.Dropout(0.5)(x)\n",
570
+ "x = layers.Bidirectional(layers.LSTM(72))(x)\n",
571
+ "x = layers.Dropout(0.5)(x)\n",
572
+ "outputs = layers.Dense(1, activation = 'sigmoid', name = 'output_layer')(x)\n",
573
+ "model = tf.keras.Model(inputs, outputs, name = \"model_lstm\")\n",
574
+ "\n",
575
+ "model.compile(loss = \"binary_crossentropy\", optimizer = 'adam', metrics = ['accuracy'])\n",
576
+ "\n",
577
+ "\n"
578
+ ]
579
+ },
580
+ {
581
+ "cell_type": "code",
582
+ "execution_count": 84,
583
+ "metadata": {},
584
+ "outputs": [
585
+ {
586
+ "name": "stdout",
587
+ "output_type": "stream",
588
+ "text": [
589
+ "Num GPUs Available: 0\n",
590
+ "[name: \"/device:CPU:0\"\n",
591
+ "device_type: \"CPU\"\n",
592
+ "memory_limit: 268435456\n",
593
+ "locality {\n",
594
+ "}\n",
595
+ "incarnation: 10970432882806203582\n",
596
+ "xla_global_id: -1\n",
597
+ "]\n"
598
+ ]
599
+ },
600
+ {
601
+ "data": {
602
+ "text/plain": [
603
+ "''"
604
+ ]
605
+ },
606
+ "execution_count": 84,
607
+ "metadata": {},
608
+ "output_type": "execute_result"
609
+ }
610
+ ],
611
+ "source": [
612
+ "print(\"Num GPUs Available: \", len(tf.config.list_physical_devices('GPU')))\n",
613
+ "\n",
614
+ "from tensorflow.python.client import device_lib\n",
615
+ "print(device_lib.list_local_devices())\n",
616
+ "tf.test.gpu_device_name()"
617
+ ]
618
+ },
619
+ {
620
+ "cell_type": "markdown",
621
+ "metadata": {
622
+ "id": "2lq6A-raPKhf"
623
+ },
624
+ "source": [
625
+ "Fitting the Model"
626
+ ]
627
+ },
628
+ {
629
+ "cell_type": "code",
630
+ "execution_count": 85,
631
+ "metadata": {
632
+ "colab": {
633
+ "base_uri": "https://localhost:8080/"
634
+ },
635
+ "id": "x23BVYmg7bll",
636
+ "outputId": "efaaab84-7e66-4ea7-b5d6-8e1634deee5d"
637
+ },
638
+ "outputs": [
639
+ {
640
+ "name": "stdout",
641
+ "output_type": "stream",
642
+ "text": [
643
+ "Epoch 1/10\n",
644
+ "5013/5013 [==============================] - 1086s 206ms/step - loss: 0.4938 - accuracy: 0.7601 - val_loss: 0.4691 - val_accuracy: 0.7740\n",
645
+ "Epoch 2/10\n",
646
+ "5013/5013 [==============================] - 1018s 203ms/step - loss: 0.4718 - accuracy: 0.7737 - val_loss: 0.4594 - val_accuracy: 0.7809\n",
647
+ "Epoch 3/10\n",
648
+ "4408/5013 [=========================>....] - ETA: 1:38 - loss: 0.4628 - accuracy: 0.7786"
649
+ ]
650
+ },
651
+ {
652
+ "name": "stderr",
653
+ "output_type": "stream",
654
+ "text": [
655
+ "IOPub message rate exceeded.\n",
656
+ "The notebook server will temporarily stop sending output\n",
657
+ "to the client in order to avoid crashing it.\n",
658
+ "To change this limit, set the config variable\n",
659
+ "`--NotebookApp.iopub_msg_rate_limit`.\n",
660
+ "\n",
661
+ "Current values:\n",
662
+ "NotebookApp.iopub_msg_rate_limit=1000.0 (msgs/sec)\n",
663
+ "NotebookApp.rate_limit_window=3.0 (secs)\n",
664
+ "\n"
665
+ ]
666
+ }
667
+ ],
668
+ "source": [
669
+ "history = model.fit(train_dataset, validation_data = valid_dataset, epochs = 10)"
670
+ ]
671
+ },
672
+ {
673
+ "cell_type": "markdown",
674
+ "metadata": {
675
+ "id": "3Lq9HPUTPQuW"
676
+ },
677
+ "source": [
678
+ "ploting the loss and accuracy curves"
679
+ ]
680
+ },
681
+ {
682
+ "cell_type": "code",
683
+ "execution_count": 86,
684
+ "metadata": {
685
+ "colab": {
686
+ "base_uri": "https://localhost:8080/",
687
+ "height": 573
688
+ },
689
+ "id": "rmyKmJABAJ24",
690
+ "outputId": "3eac8fc7-169d-48d8-9f6d-c0bbcac9f6d0"
691
+ },
692
+ "outputs": [],
693
+ "source": [
694
+ "plot_loss_curves(history)"
695
+ ]
696
+ },
697
+ {
698
+ "cell_type": "markdown",
699
+ "metadata": {
700
+ "id": "utVMQsQUPV7w"
701
+ },
702
+ "source": [
703
+ "#Part 4 : Evaluating the Trained Model"
704
+ ]
705
+ },
706
+ {
707
+ "cell_type": "markdown",
708
+ "metadata": {
709
+ "id": "SOES1ympPr5E"
710
+ },
711
+ "source": [
712
+ "Testing Model on Validation sentences"
713
+ ]
714
+ },
715
+ {
716
+ "cell_type": "code",
717
+ "execution_count": 87,
718
+ "metadata": {
719
+ "colab": {
720
+ "base_uri": "https://localhost:8080/"
721
+ },
722
+ "id": "Z0LwjwVJC5-2",
723
+ "outputId": "0c5b5c45-efbf-4fa7-8efa-41cfe315c82c"
724
+ },
725
+ "outputs": [],
726
+ "source": [
727
+ "y_probs = model.predict(val_sentences)\n"
728
+ ]
729
+ },
730
+ {
731
+ "cell_type": "markdown",
732
+ "metadata": {
733
+ "id": "U69-IYQ7P86H"
734
+ },
735
+ "source": [
736
+ "converting the probabilities in y_probs variables to class"
737
+ ]
738
+ },
739
+ {
740
+ "cell_type": "code",
741
+ "execution_count": 88,
742
+ "metadata": {
743
+ "id": "Nktk3_PZPx39"
744
+ },
745
+ "outputs": [],
746
+ "source": [
747
+ "y_preds = tf.round(y_probs)"
748
+ ]
749
+ },
750
+ {
751
+ "cell_type": "markdown",
752
+ "metadata": {
753
+ "id": "C4r1xJVQQG-x"
754
+ },
755
+ "source": [
756
+ "Comparing the results with actual validation labels with model predicted labels"
757
+ ]
758
+ },
759
+ {
760
+ "cell_type": "code",
761
+ "execution_count": 89,
762
+ "metadata": {
763
+ "colab": {
764
+ "base_uri": "https://localhost:8080/"
765
+ },
766
+ "id": "Ech5x8hzDRTa",
767
+ "outputId": "d71eb535-3681-498e-b7a9-7a4499d13d6c"
768
+ },
769
+ "outputs": [],
770
+ "source": [
771
+ "y_preds[:10]"
772
+ ]
773
+ },
774
+ {
775
+ "cell_type": "code",
776
+ "execution_count": 90,
777
+ "metadata": {
778
+ "colab": {
779
+ "base_uri": "https://localhost:8080/"
780
+ },
781
+ "id": "nwFRQS74DTrS",
782
+ "outputId": "936617ce-05ee-4ffa-b382-f604d9ab95bf"
783
+ },
784
+ "outputs": [],
785
+ "source": [
786
+ "val_labels[:10]"
787
+ ]
788
+ },
789
+ {
790
+ "cell_type": "markdown",
791
+ "metadata": {
792
+ "id": "dRUknCd7QTG7"
793
+ },
794
+ "source": [
795
+ "Building the Confustion Matrix to check model performance"
796
+ ]
797
+ },
798
+ {
799
+ "cell_type": "code",
800
+ "execution_count": 91,
801
+ "metadata": {
802
+ "colab": {
803
+ "base_uri": "https://localhost:8080/",
804
+ "height": 574
805
+ },
806
+ "id": "ZIDU6VswDlX-",
807
+ "outputId": "9d48cb6b-4219-4130-a513-f60140fb03b1"
808
+ },
809
+ "outputs": [],
810
+ "source": [
811
+ "make_confusion_matrix(val_labels, y_preds)"
812
+ ]
813
+ },
814
+ {
815
+ "cell_type": "markdown",
816
+ "metadata": {
817
+ "id": "wesRXLydQgEt"
818
+ },
819
+ "source": [
820
+ "Saving the model for Deployment"
821
+ ]
822
+ },
823
+ {
824
+ "cell_type": "code",
825
+ "execution_count": 92,
826
+ "metadata": {
827
+ "id": "JDuCL1YWIZnU"
828
+ },
829
+ "outputs": [],
830
+ "source": [
831
+ "model.save('best_model.h5')"
832
+ ]
833
+ },
834
+ {
835
+ "cell_type": "markdown",
836
+ "metadata": {
837
+ "id": "yQeGbGfUQoFZ"
838
+ },
839
+ "source": [
840
+ "loading the model to ceck whether all weights are saved"
841
+ ]
842
+ },
843
+ {
844
+ "cell_type": "code",
845
+ "execution_count": 93,
846
+ "metadata": {
847
+ "id": "Y9fd-UjsIHR6"
848
+ },
849
+ "outputs": [],
850
+ "source": [
851
+ "model = tf.keras.models.load_model(\"best_model.h5\",custom_objects={\"KerasLayer\": hub.KerasLayer})"
852
+ ]
853
+ },
854
+ {
855
+ "cell_type": "code",
856
+ "execution_count": 94,
857
+ "metadata": {
858
+ "id": "6pRODldTR_SW"
859
+ },
860
+ "outputs": [],
861
+ "source": [
862
+ "model.evaluate(valid_dataset)"
863
+ ]
864
+ },
865
+ {
866
+ "cell_type": "markdown",
867
+ "metadata": {
868
+ "id": "owboakteQvZZ"
869
+ },
870
+ "source": [
871
+ "#Part 5 : Realtime Testing of the Trained Model before Deployment"
872
+ ]
873
+ },
874
+ {
875
+ "cell_type": "markdown",
876
+ "metadata": {
877
+ "id": "GeDTbZUpRNyO"
878
+ },
879
+ "source": [
880
+ "sentence from Economics Times"
881
+ ]
882
+ },
883
+ {
884
+ "cell_type": "code",
885
+ "execution_count": 95,
886
+ "metadata": {
887
+ "id": "uwyrKEXmdgp-"
888
+ },
889
+ "outputs": [],
890
+ "source": [
891
+ "custom = \"Student loan forgiveness has scammers ‘on the move,’ warns FTC\""
892
+ ]
893
+ },
894
+ {
895
+ "cell_type": "code",
896
+ "execution_count": 96,
897
+ "metadata": {},
898
+ "outputs": [],
899
+ "source": [
900
+ "custom = \"Sobana is annoying\""
901
+ ]
902
+ },
903
+ {
904
+ "cell_type": "markdown",
905
+ "metadata": {
906
+ "id": "nxt09v4cRqHU"
907
+ },
908
+ "source": [
909
+ "creating a function to predict whether its is postive or negative news"
910
+ ]
911
+ },
912
+ {
913
+ "cell_type": "code",
914
+ "execution_count": 97,
915
+ "metadata": {
916
+ "id": "-DwxUw33-tHw"
917
+ },
918
+ "outputs": [],
919
+ "source": [
920
+ "def predict_on_sentence(model, sentence):\n",
921
+ " \"\"\"\n",
922
+ " Uses model to make a prediction on sentence.\n",
923
+ "\n",
924
+ " Returns the sentence, the predicted label and the prediction probability.\n",
925
+ " \"\"\"\n",
926
+ " pred_prob = model.predict([sentence])\n",
927
+ " pred_label = tf.squeeze(tf.round(pred_prob)).numpy()\n",
928
+ " print(f\"Pred: {pred_label}\", \"(It's a Positive News)\" if pred_label > 0 else \"(It's a Negative News)\", f\"Prob: {pred_prob[0][0]}\")\n",
929
+ " print(f\"Text:\\n{sentence}\")"
930
+ ]
931
+ },
932
+ {
933
+ "cell_type": "markdown",
934
+ "metadata": {
935
+ "id": "WHeWWqLeR3T2"
936
+ },
937
+ "source": [
938
+ "Results"
939
+ ]
940
+ },
941
+ {
942
+ "cell_type": "code",
943
+ "execution_count": 98,
944
+ "metadata": {
945
+ "colab": {
946
+ "base_uri": "https://localhost:8080/"
947
+ },
948
+ "id": "hLt_5C8B_1Ek",
949
+ "outputId": "7296e429-2edc-4ee7-e8f0-f2976ab1fdf7"
950
+ },
951
+ "outputs": [],
952
+ "source": [
953
+ "predict_on_sentence(model = model, sentence=custom)"
954
+ ]
955
+ },
956
+ {
957
+ "cell_type": "code",
958
+ "execution_count": null,
959
+ "metadata": {},
960
+ "outputs": [],
961
+ "source": []
962
+ },
963
+ {
964
+ "cell_type": "code",
965
+ "execution_count": null,
966
+ "metadata": {},
967
+ "outputs": [],
968
+ "source": []
969
+ }
970
+ ],
971
+ "metadata": {
972
+ "accelerator": "GPU",
973
+ "colab": {
974
+ "provenance": []
975
+ },
976
+ "kernelspec": {
977
+ "display_name": "Python 3 (ipykernel)",
978
+ "language": "python",
979
+ "name": "python3"
980
+ },
981
+ "language_info": {
982
+ "codemirror_mode": {
983
+ "name": "ipython",
984
+ "version": 3
985
+ },
986
+ "file_extension": ".py",
987
+ "mimetype": "text/x-python",
988
+ "name": "python",
989
+ "nbconvert_exporter": "python",
990
+ "pygments_lexer": "ipython3",
991
+ "version": "3.10.10"
992
+ }
993
+ },
994
+ "nbformat": 4,
995
+ "nbformat_minor": 1
996
+ }
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ numpy==1.23.5
2
+ streamlit==1.15.1
3
+ tensorflow_cpu==2.8.0
4
+ transformers==4.27.2
5
+
runtime.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ python-3.9.15