Fawazzx commited on
Commit
1e10b23
·
1 Parent(s): 535f433

Upload 17 files

Browse files
BackPropogation.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from tqdm import tqdm
3
+
4
+
5
+ class BackPropogation:
6
+ def __init__(self,learning_rate=0.01, epochs=100,activation_function='step'):
7
+ self.bias = 0
8
+ self.learning_rate = learning_rate
9
+ self.max_epochs = epochs
10
+ self.activation_function = activation_function
11
+
12
+
13
+ def activate(self, x):
14
+ if self.activation_function == 'step':
15
+ return 1 if x >= 0 else 0
16
+ elif self.activation_function == 'sigmoid':
17
+ return 1 if (1 / (1 + np.exp(-x)))>=0.5 else 0
18
+ elif self.activation_function == 'relu':
19
+ return 1 if max(0,x)>=0.5 else 0
20
+
21
+ def fit(self, X, y):
22
+ error_sum=0
23
+ n_features = X.shape[1]
24
+ self.weights = np.zeros((n_features))
25
+ for epoch in tqdm(range(self.max_epochs)):
26
+ for i in range(len(X)):
27
+ inputs = X[i]
28
+ target = y[i]
29
+ weighted_sum = np.dot(inputs, self.weights) + self.bias
30
+ prediction = self.activate(weighted_sum)
31
+
32
+ # Calculating loss and updating weights.
33
+ error = target - prediction
34
+ self.weights += self.learning_rate * error * inputs
35
+ self.bias += self.learning_rate * error
36
+
37
+ print(f"Updated Weights after epoch {epoch} with {self.weights}")
38
+ print("Training Completed")
39
+
40
+ def predict(self, X):
41
+ predictions = []
42
+ for i in range(len(X)):
43
+ inputs = X[i]
44
+ weighted_sum = np.dot(inputs, self.weights) + self.bias
45
+ prediction = self.activate(weighted_sum)
46
+ predictions.append(prediction)
47
+ return predictions
48
+
49
+
50
+
51
+
52
+
53
+
Perceptron.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from tqdm import tqdm
3
+
4
+
5
+ class Perceptron:
6
+
7
+ def __init__(self,learning_rate=0.01, epochs=100,activation_function='step'):
8
+ self.bias = 0
9
+ self.learning_rate = learning_rate
10
+ self.max_epochs = epochs
11
+ self.activation_function = activation_function
12
+
13
+
14
+ def activate(self, x):
15
+ if self.activation_function == 'step':
16
+ return 1 if x >= 0 else 0
17
+ elif self.activation_function == 'sigmoid':
18
+ return 1 if (1 / (1 + np.exp(-x)))>=0.5 else 0
19
+ elif self.activation_function == 'relu':
20
+ return 1 if max(0,x)>=0.5 else 0
21
+
22
+
23
+ def fit(self, X, y):
24
+ n_features = X.shape[1]
25
+ self.weights = np.random.randint(n_features, size=(n_features))
26
+ for epoch in tqdm(range(self.max_epochs)):
27
+ for i in range(len(X)):
28
+ inputs = X[i]
29
+ target = y[i]
30
+ weighted_sum = np.dot(inputs, self.weights) + self.bias
31
+ prediction = self.activate(weighted_sum)
32
+ print("Training Completed")
33
+
34
+
35
+ def predict(self, X):
36
+ predictions = []
37
+ for i in range(len(X)):
38
+ inputs = X[i]
39
+ weighted_sum = np.dot(inputs, self.weights) + self.bias
40
+ prediction = self.activate(weighted_sum)
41
+ predictions.append(prediction)
42
+ return predictions
43
+
44
+
45
+
46
+
47
+
48
+
README.md CHANGED
@@ -1,10 +1,10 @@
1
  ---
2
- title: Multi Model Classifier
3
- emoji: 🏢
4
- colorFrom: gray
5
- colorTo: red
6
  sdk: streamlit
7
- sdk_version: 1.29.0
8
  app_file: app.py
9
  pinned: false
10
  ---
 
1
  ---
2
+ title: Nn Models
3
+ emoji: 🌍
4
+ colorFrom: pink
5
+ colorTo: green
6
  sdk: streamlit
7
+ sdk_version: 1.28.2
8
  app_file: app.py
9
  pinned: false
10
  ---
app.py ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import pickle
3
+ import tensorflow as tf
4
+ from tensorflow.keras.preprocessing import sequence
5
+ from tensorflow.keras.saving import load_model
6
+ import numpy as np
7
+ import cv2
8
+ from PIL import Image
9
+
10
+
11
+ st.title('Deep Learning Classifier App')
12
+ task = st.selectbox('Select Task', ['Choose one','Sentiment Classification', 'Tumor Detection'])
13
+
14
+ if task=='Tumor Detection':
15
+ st.subheader('Tumor Detection with CNN')
16
+ # CNN
17
+ cnn_model = load_model("cnn_model.h5")
18
+
19
+ img = st.file_uploader('Upload image', type=['jpeg', 'jpg', 'png'])
20
+
21
+ def cnn_make_prediction(img,model):
22
+ img=Image.open(img)
23
+ img=img.resize((128,128))
24
+ img=np.array(img)
25
+ input_img = np.expand_dims(img, axis=0)
26
+ res = model.predict(input_img)
27
+ if res:
28
+ return"Tumor Detected"
29
+ else:
30
+ return"No Tumor Detected"
31
+
32
+ if img is not None:
33
+ st.image(img, caption = "Image preview")
34
+ if st.button('Submit'):
35
+ pred = cnn_make_prediction(img, cnn_model)
36
+ st.write(pred)
37
+
38
+
39
+ if task=='Sentiment Classification':
40
+ arcs = ['Perceptron', 'Backpropagation', 'DNN', 'RNN', 'LSTM']
41
+ arc = st.radio('Pick one:', arcs, horizontal=True)
42
+
43
+ if arc == arcs[0]:
44
+ # Perceptron
45
+ with open("ppn_model.pkl",'rb') as file:
46
+ perceptron = pickle.load(file)
47
+ with open("ppn_tokeniser.pkl",'rb') as file:
48
+ ppn_tokeniser = pickle.load(file)
49
+
50
+ st.subheader('Movie Review Classification using Perceptron')
51
+ inp = st.text_area('Enter message')
52
+
53
+ def ppn_make_predictions(inp, model):
54
+ encoded_inp = ppn_tokeniser.texts_to_sequences([inp])
55
+ padded_inp = sequence.pad_sequences(encoded_inp, maxlen=500)
56
+ res = model.predict(padded_inp)
57
+ if res:
58
+ return "Negative"
59
+ else:
60
+ return "Positive"
61
+
62
+ if st.button('Check'):
63
+ pred = ppn_make_predictions([inp], perceptron)
64
+ st.write(pred)
65
+
66
+ elif arc == arcs[1]:
67
+ # BackPropogation
68
+ with open("bp_model.pkl",'rb') as file:
69
+ backprop = pickle.load(file)
70
+ with open("bp_tokeniser.pkl",'rb') as file:
71
+ bp_tokeniser = pickle.load(file)
72
+
73
+ st.subheader('Movie Review Classification using Backpropagation')
74
+ inp = st.text_area('Enter message')
75
+
76
+ def bp_make_predictions(inp, model):
77
+ encoded_inp = bp_tokeniser.texts_to_sequences([inp])
78
+ padded_inp = sequence.pad_sequences(encoded_inp, maxlen=500)
79
+ res = model.predict(padded_inp)
80
+ if res:
81
+ return "Negative"
82
+ else:
83
+ return "Positive"
84
+
85
+ if st.button('Check'):
86
+ pred = bp_make_predictions([inp], backprop)
87
+ st.write(pred)
88
+
89
+
90
+ elif arc == arcs[2]:
91
+ # DNN
92
+ dnn_model = load_model("dnn_model.h5")
93
+ with open("dnn_tokeniser.pkl",'rb') as file:
94
+ dnn_tokeniser = pickle.load(file)
95
+
96
+ st.subheader('SMS Spam Classification using DNN')
97
+ inp = st.text_area('Enter message')
98
+
99
+ def dnn_make_predictions(inp, model):
100
+ encoded_inp = dnn_tokeniser.texts_to_sequences(inp)
101
+ padded_inp = sequence.pad_sequences(encoded_inp, maxlen=10, padding='post')
102
+ res = (model.predict(padded_inp) > 0.5).astype("int32")
103
+ if res:
104
+ return "Spam"
105
+ else:
106
+ return "Ham"
107
+
108
+ if st.button('Check'):
109
+ pred = dnn_make_predictions([inp], dnn_model)
110
+ st.write(pred)
111
+
112
+
113
+ elif arc == arcs[3]:
114
+ # RNN
115
+ rnn_model = load_model("rnn_model.h5")
116
+
117
+ with open("rnn_tokeniser.pkl",'rb') as file:
118
+ rnn_tokeniser = pickle.load(file)
119
+
120
+ st.subheader('SMS Spam Classification using RNN')
121
+ inp = st.text_area('Enter message')
122
+
123
+ def rnn_make_predictions(inp, model):
124
+ encoded_inp = rnn_tokeniser.texts_to_sequences(inp)
125
+ padded_inp = sequence.pad_sequences(encoded_inp, maxlen=10, padding='post')
126
+ res = (model.predict(padded_inp) > 0.5).astype("int32")
127
+ if res:
128
+ return "Spam"
129
+ else:
130
+ return "Ham"
131
+
132
+
133
+ if st.button('Check'):
134
+ pred = rnn_make_predictions([inp], rnn_model)
135
+ st.write(pred)
136
+
137
+
138
+ elif arc == arcs[4]:
139
+ # LSTM
140
+ lstm_model = load_model("lstm_model.h5")
141
+
142
+ with open("lstm_tokeniser.pkl",'rb') as file:
143
+ lstm_tokeniser = pickle.load(file)
144
+
145
+ st.subheader('Movie Review Classification using LSTM')
146
+ inp = st.text_area('Enter message')
147
+
148
+ def lstm_make_predictions(inp, model):
149
+ inp = lstm_tokeniser.texts_to_sequences(inp)
150
+ inp = sequence.pad_sequences(inp, maxlen=500)
151
+ res = (model.predict(inp) > 0.5).astype("int32")
152
+ if res:
153
+ return "Negative"
154
+ else:
155
+ return "Positive"
156
+
157
+ if st.button('Check'):
158
+ pred = lstm_make_predictions([inp], lstm_model)
159
+ st.write(pred)
bp_model.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2898ac4c9ef15f477f4bd8ac49b1ae1357b92e6d8867b14c0b05ec7a4ea45149
3
+ size 4300
bp_tokeniser.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:09ac51e8d69f97cb4b0756b2be644b509c9798371fbf469b378369548b5d0e21
3
+ size 4992453
cnn_model.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:49102a82d743e9cfbe13544a601508061b135b1d9f8230c55782143da19a7671
3
+ size 391811360
dnn_model.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a3bfb8224c359673dbf1382dc171206736a5c3d4ec4b42ee1cb84d5c19a53297
3
+ size 80904
dnn_tokeniser.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ad767898df8dc404c911583f1b9b061c19e1354c8998681031e6eb76d895857
3
+ size 287385
lstm_model.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:84864b048b49a5984b591540dbc8cbbb28e6d76a00a9d270a888823d665fba3b
3
+ size 41224696
lstm_tokeniser.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f8c27a42690440c5fca488a2ac82911e196f013d1b54b97d3468805722183ca
3
+ size 4534143
ppn_model.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:78829f6c48f5b072f359409219f41b32aeee008afe380374926c7963dc374cff
3
+ size 2267
ppn_tokeniser.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1f3b29ee785f9e52d1e1fa10c4d2afa83be81276175e0b5923801aba6a9514db
3
+ size 4848716
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ tensorflow==2.13.0
2
+ numpy==1.23.5
3
+ opencv-python==4.8.1.78
4
+ Pillow==9.3.0
5
+ streamlit==1.26.0
6
+ requires.io
7
+
rnn_model.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fd08d01763fe3832cf85a0eaadedc1358ee07883a728c191d031457eba78a80c
3
+ size 2243672
rnn_tokeniser.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:21d2caaaed5daacda66fb5fd4da3e7f07401efac2973a4c99491d17a35a67ee3
3
+ size 287385