kysmet commited on
Commit
87e340a
β€’
1 Parent(s): 508fb3b
README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- title: Entex
3
- emoji: πŸš€
4
- colorFrom: blue
5
- colorTo: purple
6
- sdk: gradio
7
- sdk_version: 3.0.12
8
- app_file: app.py
9
- pinned: false
10
- license: other
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
app.py CHANGED
@@ -1,16 +1,70 @@
1
  import gradio as gr
2
 
3
- api = gr.Interface.load("huggingface/EleutherAI/gpt-j-6B")
4
-
5
- def complete_with_gpt(text):
6
- # Use the last 50 characters of the text as context
7
- return text[:-50] + api(text[-50:])
8
-
9
- with gr.Blocks() as demo:
10
- textbox = gr.Textbox(placeholder="Type here and press enter...", lines=4)
11
- btn = gr.Button("Generate")
12
-
13
- btn.click(complete_with_gpt, textbox, textbox)
14
-
15
- if __name__ == "__main__":
16
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
 
3
+ from tensorflow.keras.utils import img_to_array
4
+ import imutils
5
+ import cv2
6
+ from keras.models import load_model
7
+ import numpy as np
8
+
9
+ # parameters for loading data and images
10
+ detection_model_path = 'haarcascade_files/haarcascade_frontalface_default.xml'
11
+ emotion_model_path = 'models/_mini_XCEPTION.102-0.66.hdf5'
12
+
13
+ # hyper-parameters for bounding boxes shape
14
+ # loading models
15
+ face_detection = cv2.CascadeClassifier(detection_model_path)
16
+ emotion_classifier = load_model(emotion_model_path, compile=False)
17
+ EMOTIONS = ["angry", "disgusted", "scared", "happy", "sad", "surprised",
18
+ "neutral"]
19
+
20
+
21
+ def predict(frame):
22
+
23
+ frame = imutils.resize(frame, width=300)
24
+ gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
25
+ faces = face_detection.detectMultiScale(gray, scaleFactor=1.1,
26
+ minNeighbors=5, minSize=(30, 30),
27
+ flags=cv2.CASCADE_SCALE_IMAGE)
28
+
29
+ frameClone = frame.copy()
30
+ if len(faces) > 0:
31
+ faces = sorted(faces, reverse=True,
32
+ key=lambda x: (x[2] - x[0]) * (x[3] - x[1]))[0]
33
+ (fX, fY, fW, fH) = faces
34
+ # Extract the ROI of the face from the grayscale image, resize it to a fixed 28x28 pixels, and then prepare
35
+ # the ROI for classification via the CNN
36
+ roi = gray[fY:fY + fH, fX:fX + fW]
37
+ roi = cv2.resize(roi, (64, 64))
38
+ roi = roi.astype("float") / 255.0
39
+ roi = img_to_array(roi)
40
+ roi = np.expand_dims(roi, axis=0)
41
+
42
+ preds = emotion_classifier.predict(roi)[0]
43
+ label = EMOTIONS[preds.argmax()]
44
+ else:
45
+ return frameClone, "Can't find your face"
46
+
47
+ probs = {}
48
+ cv2.putText(frameClone, label, (fX, fY - 10),
49
+ cv2.FONT_HERSHEY_DUPLEX, 1, (238, 164, 64), 1)
50
+ cv2.rectangle(frameClone, (fX, fY), (fX + fW, fY + fH),
51
+ (238, 164, 64), 2)
52
+
53
+ for (i, (emotion, prob)) in enumerate(zip(EMOTIONS, preds)):
54
+ probs[emotion] = float(prob)
55
+
56
+ return frameClone, probs
57
+
58
+
59
+ inp = gr.inputs.Image(source="webcam", label="Your face")
60
+ out = [
61
+ gr.outputs.Image(label="Predicted Emotion"),
62
+ gr.outputs.Label(num_top_classes=3, label="Top 3 Probabilities")
63
+ ]
64
+ title = "Emotion Classification"
65
+ description = "How well can this model predict your emotions? Take a picture with your webcam, and it will guess if" \
66
+ " you are: happy, sad, angry, disgusted, scared, surprised, or neutral."
67
+ thumbnail = "https://raw.githubusercontent.com/gradio-app/hub-emotion-recognition/master/thumbnail.png"
68
+
69
+ gr.Interface(predict, inp, out, capture_session=True, title=title, thumbnail=thumbnail,
70
+ description=description).launch(inbrowser=True)
app_text_gen.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ api = gr.Interface.load("huggingface/EleutherAI/gpt-j-6B")
4
+
5
+ def complete_with_gpt(text):
6
+ # Use the last 50 characters of the text as context
7
+ return text[:-50] + api(text[-50:])
8
+
9
+ with gr.Blocks() as demo:
10
+ textbox = gr.Textbox(placeholder="Type here and press enter...", lines=4)
11
+ btn = gr.Button("Generate")
12
+
13
+ btn.click(complete_with_gpt, textbox, textbox)
14
+
15
+ if __name__ == "__main__":
16
+ demo.launch()
emotions/Happy.PNG ADDED
emotions/angry.PNG ADDED
emotions/disgust.PNG ADDED
emotions/neutral.PNG ADDED
emotions/sad.PNG ADDED
emotions/scared.PNG ADDED
fer2013/fer2013/readme.txt ADDED
@@ -0,0 +1 @@
 
1
+ put the csv file downloaded from the link i have provided here
haarcascade_files/haarcascade_eye.xml ADDED
The diff for this file is too large to render. See raw diff
haarcascade_files/haarcascade_frontalface_default.xml ADDED
The diff for this file is too large to render. See raw diff
load_and_process.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import cv2
3
+ import numpy as np
4
+
5
+
6
+ dataset_path = 'fer2013/fer2013/fer2013.csv'
7
+ image_size=(48,48)
8
+
9
+ def load_fer2013():
10
+ data = pd.read_csv(dataset_path)
11
+ pixels = data['pixels'].tolist()
12
+ width, height = 48, 48
13
+ faces = []
14
+ for pixel_sequence in pixels:
15
+ face = [int(pixel) for pixel in pixel_sequence.split(' ')]
16
+ face = np.asarray(face).reshape(width, height)
17
+ face = cv2.resize(face.astype('uint8'),image_size)
18
+ faces.append(face.astype('float32'))
19
+ faces = np.asarray(faces)
20
+ faces = np.expand_dims(faces, -1)
21
+ emotions = pd.get_dummies(data['emotion']).as_matrix()
22
+ return faces, emotions
23
+
24
+ def preprocess_input(x, v2=True):
25
+ x = x.astype('float32')
26
+ x = x / 255.0
27
+ if v2:
28
+ x = x - 0.5
29
+ x = x * 2.0
30
+ return x
models/__pycache__/cnn.cpython-36.pyc ADDED
Binary file (7.49 kB). View file
models/_mini_XCEPTION.102-0.66.hdf5 ADDED
Binary file (873 kB). View file
models/cnn.py ADDED
@@ -0,0 +1,353 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from keras.layers import Activation, Convolution2D, Dropout, Conv2D
2
+ from keras.layers import AveragePooling2D, BatchNormalization
3
+ from keras.layers import GlobalAveragePooling2D
4
+ from keras.models import Sequential
5
+ from keras.layers import Flatten
6
+ from keras.models import Model
7
+ from keras.layers import Input
8
+ from keras.layers import MaxPooling2D
9
+ from keras.layers import SeparableConv2D
10
+ from keras import layers
11
+ from keras.regularizers import l2
12
+
13
+ def simple_CNN(input_shape, num_classes):
14
+
15
+ model = Sequential()
16
+ model.add(Convolution2D(filters=16, kernel_size=(7, 7), padding='same',
17
+ name='image_array', input_shape=input_shape))
18
+ model.add(BatchNormalization())
19
+ model.add(Convolution2D(filters=16, kernel_size=(7, 7), padding='same'))
20
+ model.add(BatchNormalization())
21
+ model.add(Activation('relu'))
22
+ model.add(AveragePooling2D(pool_size=(2, 2), padding='same'))
23
+ model.add(Dropout(.5))
24
+
25
+ model.add(Convolution2D(filters=32, kernel_size=(5, 5), padding='same'))
26
+ model.add(BatchNormalization())
27
+ model.add(Convolution2D(filters=32, kernel_size=(5, 5), padding='same'))
28
+ model.add(BatchNormalization())
29
+ model.add(Activation('relu'))
30
+ model.add(AveragePooling2D(pool_size=(2, 2), padding='same'))
31
+ model.add(Dropout(.5))
32
+
33
+ model.add(Convolution2D(filters=64, kernel_size=(3, 3), padding='same'))
34
+ model.add(BatchNormalization())
35
+ model.add(Convolution2D(filters=64, kernel_size=(3, 3), padding='same'))
36
+ model.add(BatchNormalization())
37
+ model.add(Activation('relu'))
38
+ model.add(AveragePooling2D(pool_size=(2, 2), padding='same'))
39
+ model.add(Dropout(.5))
40
+
41
+ model.add(Convolution2D(filters=128, kernel_size=(3, 3), padding='same'))
42
+ model.add(BatchNormalization())
43
+ model.add(Convolution2D(filters=128, kernel_size=(3, 3), padding='same'))
44
+ model.add(BatchNormalization())
45
+ model.add(Activation('relu'))
46
+ model.add(AveragePooling2D(pool_size=(2, 2), padding='same'))
47
+ model.add(Dropout(.5))
48
+
49
+ model.add(Convolution2D(filters=256, kernel_size=(3, 3), padding='same'))
50
+ model.add(BatchNormalization())
51
+ model.add(Convolution2D(filters=num_classes, kernel_size=(3, 3), padding='same'))
52
+ model.add(GlobalAveragePooling2D())
53
+ model.add(Activation('softmax',name='predictions'))
54
+ return model
55
+
56
+ def simpler_CNN(input_shape, num_classes):
57
+
58
+ model = Sequential()
59
+ model.add(Convolution2D(filters=16, kernel_size=(5, 5), padding='same',
60
+ name='image_array', input_shape=input_shape))
61
+ model.add(BatchNormalization())
62
+ model.add(Convolution2D(filters=16, kernel_size=(5, 5),
63
+ strides=(2, 2), padding='same'))
64
+ model.add(BatchNormalization())
65
+ model.add(Activation('relu'))
66
+ model.add(Dropout(.25))
67
+
68
+ model.add(Convolution2D(filters=32, kernel_size=(5, 5), padding='same'))
69
+ model.add(BatchNormalization())
70
+ model.add(Convolution2D(filters=32, kernel_size=(5, 5),
71
+ strides=(2, 2), padding='same'))
72
+ model.add(BatchNormalization())
73
+ model.add(Activation('relu'))
74
+ model.add(Dropout(.25))
75
+
76
+ model.add(Convolution2D(filters=64, kernel_size=(3, 3), padding='same'))
77
+ model.add(BatchNormalization())
78
+ model.add(Convolution2D(filters=64, kernel_size=(3, 3),
79
+ strides=(2, 2), padding='same'))
80
+ model.add(BatchNormalization())
81
+ model.add(Activation('relu'))
82
+ model.add(Dropout(.25))
83
+
84
+ model.add(Convolution2D(filters=64, kernel_size=(1, 1), padding='same'))
85
+ model.add(BatchNormalization())
86
+ model.add(Convolution2D(filters=128, kernel_size=(3, 3),
87
+ strides=(2, 2), padding='same'))
88
+ model.add(BatchNormalization())
89
+ model.add(Activation('relu'))
90
+ model.add(Dropout(.25))
91
+
92
+ model.add(Convolution2D(filters=256, kernel_size=(1, 1), padding='same'))
93
+ model.add(BatchNormalization())
94
+ model.add(Convolution2D(filters=128, kernel_size=(3, 3),
95
+ strides=(2, 2), padding='same'))
96
+
97
+ model.add(Convolution2D(filters=256, kernel_size=(1, 1), padding='same'))
98
+ model.add(BatchNormalization())
99
+ model.add(Convolution2D(filters=num_classes, kernel_size=(3, 3),
100
+ strides=(2, 2), padding='same'))
101
+
102
+ model.add(Flatten())
103
+ #model.add(GlobalAveragePooling2D())
104
+ model.add(Activation('softmax',name='predictions'))
105
+ return model
106
+
107
+ def tiny_XCEPTION(input_shape, num_classes, l2_regularization=0.01):
108
+ regularization = l2(l2_regularization)
109
+
110
+ # base
111
+ img_input = Input(input_shape)
112
+ x = Conv2D(5, (3, 3), strides=(1, 1), kernel_regularizer=regularization,
113
+ use_bias=False)(img_input)
114
+ x = BatchNormalization()(x)
115
+ x = Activation('relu')(x)
116
+ x = Conv2D(5, (3, 3), strides=(1, 1), kernel_regularizer=regularization,
117
+ use_bias=False)(x)
118
+ x = BatchNormalization()(x)
119
+ x = Activation('relu')(x)
120
+
121
+ # module 1
122
+ residual = Conv2D(8, (1, 1), strides=(2, 2),
123
+ padding='same', use_bias=False)(x)
124
+ residual = BatchNormalization()(residual)
125
+
126
+ x = SeparableConv2D(8, (3, 3), padding='same',
127
+ kernel_regularizer=regularization,
128
+ use_bias=False)(x)
129
+ x = BatchNormalization()(x)
130
+ x = Activation('relu')(x)
131
+ x = SeparableConv2D(8, (3, 3), padding='same',
132
+ kernel_regularizer=regularization,
133
+ use_bias=False)(x)
134
+ x = BatchNormalization()(x)
135
+
136
+ x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
137
+ x = layers.add([x, residual])
138
+
139
+ # module 2
140
+ residual = Conv2D(16, (1, 1), strides=(2, 2),
141
+ padding='same', use_bias=False)(x)
142
+ residual = BatchNormalization()(residual)
143
+
144
+ x = SeparableConv2D(16, (3, 3), padding='same',
145
+ kernel_regularizer=regularization,
146
+ use_bias=False)(x)
147
+ x = BatchNormalization()(x)
148
+ x = Activation('relu')(x)
149
+ x = SeparableConv2D(16, (3, 3), padding='same',
150
+ kernel_regularizer=regularization,
151
+ use_bias=False)(x)
152
+ x = BatchNormalization()(x)
153
+
154
+ x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
155
+ x = layers.add([x, residual])
156
+
157
+ # module 3
158
+ residual = Conv2D(32, (1, 1), strides=(2, 2),
159
+ padding='same', use_bias=False)(x)
160
+ residual = BatchNormalization()(residual)
161
+
162
+ x = SeparableConv2D(32, (3, 3), padding='same',
163
+ kernel_regularizer=regularization,
164
+ use_bias=False)(x)
165
+ x = BatchNormalization()(x)
166
+ x = Activation('relu')(x)
167
+ x = SeparableConv2D(32, (3, 3), padding='same',
168
+ kernel_regularizer=regularization,
169
+ use_bias=False)(x)
170
+ x = BatchNormalization()(x)
171
+
172
+ x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
173
+ x = layers.add([x, residual])
174
+
175
+ # module 4
176
+ residual = Conv2D(64, (1, 1), strides=(2, 2),
177
+ padding='same', use_bias=False)(x)
178
+ residual = BatchNormalization()(residual)
179
+
180
+ x = SeparableConv2D(64, (3, 3), padding='same',
181
+ kernel_regularizer=regularization,
182
+ use_bias=False)(x)
183
+ x = BatchNormalization()(x)
184
+ x = Activation('relu')(x)
185
+ x = SeparableConv2D(64, (3, 3), padding='same',
186
+ kernel_regularizer=regularization,
187
+ use_bias=False)(x)
188
+ x = BatchNormalization()(x)
189
+
190
+ x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
191
+ x = layers.add([x, residual])
192
+
193
+ x = Conv2D(num_classes, (3, 3),
194
+ #kernel_regularizer=regularization,
195
+ padding='same')(x)
196
+ x = GlobalAveragePooling2D()(x)
197
+ output = Activation('softmax',name='predictions')(x)
198
+
199
+ model = Model(img_input, output)
200
+ return model
201
+
202
+
203
+ def mini_XCEPTION(input_shape, num_classes, l2_regularization=0.01):
204
+ regularization = l2(l2_regularization)
205
+
206
+ # base
207
+ img_input = Input(input_shape)
208
+ x = Conv2D(8, (3, 3), strides=(1, 1), kernel_regularizer=regularization,
209
+ use_bias=False)(img_input)
210
+ x = BatchNormalization()(x)
211
+ x = Activation('relu')(x)
212
+ x = Conv2D(8, (3, 3), strides=(1, 1), kernel_regularizer=regularization,
213
+ use_bias=False)(x)
214
+ x = BatchNormalization()(x)
215
+ x = Activation('relu')(x)
216
+
217
+ # module 1
218
+ residual = Conv2D(16, (1, 1), strides=(2, 2),
219
+ padding='same', use_bias=False)(x)
220
+ residual = BatchNormalization()(residual)
221
+
222
+ x = SeparableConv2D(16, (3, 3), padding='same',
223
+ kernel_regularizer=regularization,
224
+ use_bias=False)(x)
225
+ x = BatchNormalization()(x)
226
+ x = Activation('relu')(x)
227
+ x = SeparableConv2D(16, (3, 3), padding='same',
228
+ kernel_regularizer=regularization,
229
+ use_bias=False)(x)
230
+ x = BatchNormalization()(x)
231
+
232
+ x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
233
+ x = layers.add([x, residual])
234
+
235
+ # module 2
236
+ residual = Conv2D(32, (1, 1), strides=(2, 2),
237
+ padding='same', use_bias=False)(x)
238
+ residual = BatchNormalization()(residual)
239
+
240
+ x = SeparableConv2D(32, (3, 3), padding='same',
241
+ kernel_regularizer=regularization,
242
+ use_bias=False)(x)
243
+ x = BatchNormalization()(x)
244
+ x = Activation('relu')(x)
245
+ x = SeparableConv2D(32, (3, 3), padding='same',
246
+ kernel_regularizer=regularization,
247
+ use_bias=False)(x)
248
+ x = BatchNormalization()(x)
249
+
250
+ x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
251
+ x = layers.add([x, residual])
252
+
253
+ # module 3
254
+ residual = Conv2D(64, (1, 1), strides=(2, 2),
255
+ padding='same', use_bias=False)(x)
256
+ residual = BatchNormalization()(residual)
257
+
258
+ x = SeparableConv2D(64, (3, 3), padding='same',
259
+ kernel_regularizer=regularization,
260
+ use_bias=False)(x)
261
+ x = BatchNormalization()(x)
262
+ x = Activation('relu')(x)
263
+ x = SeparableConv2D(64, (3, 3), padding='same',
264
+ kernel_regularizer=regularization,
265
+ use_bias=False)(x)
266
+ x = BatchNormalization()(x)
267
+
268
+ x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
269
+ x = layers.add([x, residual])
270
+
271
+ # module 4
272
+ residual = Conv2D(128, (1, 1), strides=(2, 2),
273
+ padding='same', use_bias=False)(x)
274
+ residual = BatchNormalization()(residual)
275
+
276
+ x = SeparableConv2D(128, (3, 3), padding='same',
277
+ kernel_regularizer=regularization,
278
+ use_bias=False)(x)
279
+ x = BatchNormalization()(x)
280
+ x = Activation('relu')(x)
281
+ x = SeparableConv2D(128, (3, 3), padding='same',
282
+ kernel_regularizer=regularization,
283
+ use_bias=False)(x)
284
+ x = BatchNormalization()(x)
285
+
286
+ x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
287
+ x = layers.add([x, residual])
288
+
289
+ x = Conv2D(num_classes, (3, 3),
290
+ #kernel_regularizer=regularization,
291
+ padding='same')(x)
292
+ x = GlobalAveragePooling2D()(x)
293
+ output = Activation('softmax',name='predictions')(x)
294
+
295
+ model = Model(img_input, output)
296
+ return model
297
+
298
+ def big_XCEPTION(input_shape, num_classes):
299
+ img_input = Input(input_shape)
300
+ x = Conv2D(32, (3, 3), strides=(2, 2), use_bias=False)(img_input)
301
+ x = BatchNormalization(name='block1_conv1_bn')(x)
302
+ x = Activation('relu', name='block1_conv1_act')(x)
303
+ x = Conv2D(64, (3, 3), use_bias=False)(x)
304
+ x = BatchNormalization(name='block1_conv2_bn')(x)
305
+ x = Activation('relu', name='block1_conv2_act')(x)
306
+
307
+ residual = Conv2D(128, (1, 1), strides=(2, 2),
308
+ padding='same', use_bias=False)(x)
309
+ residual = BatchNormalization()(residual)
310
+
311
+ x = SeparableConv2D(128, (3, 3), padding='same', use_bias=False)(x)
312
+ x = BatchNormalization(name='block2_sepconv1_bn')(x)
313
+ x = Activation('relu', name='block2_sepconv2_act')(x)
314
+ x = SeparableConv2D(128, (3, 3), padding='same', use_bias=False)(x)
315
+ x = BatchNormalization(name='block2_sepconv2_bn')(x)
316
+
317
+ x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
318
+ x = layers.add([x, residual])
319
+
320
+ residual = Conv2D(256, (1, 1), strides=(2, 2),
321
+ padding='same', use_bias=False)(x)
322
+ residual = BatchNormalization()(residual)
323
+
324
+ x = Activation('relu', name='block3_sepconv1_act')(x)
325
+ x = SeparableConv2D(256, (3, 3), padding='same', use_bias=False)(x)
326
+ x = BatchNormalization(name='block3_sepconv1_bn')(x)
327
+ x = Activation('relu', name='block3_sepconv2_act')(x)
328
+ x = SeparableConv2D(256, (3, 3), padding='same', use_bias=False)(x)
329
+ x = BatchNormalization(name='block3_sepconv2_bn')(x)
330
+
331
+ x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
332
+ x = layers.add([x, residual])
333
+ x = Conv2D(num_classes, (3, 3),
334
+ #kernel_regularizer=regularization,
335
+ padding='same')(x)
336
+ x = GlobalAveragePooling2D()(x)
337
+ output = Activation('softmax',name='predictions')(x)
338
+
339
+ model = Model(img_input, output)
340
+ return model
341
+
342
+
343
+ if __name__ == "__main__":
344
+ input_shape = (64, 64, 1)
345
+ num_classes = 7
346
+ #model = tiny_XCEPTION(input_shape, num_classes)
347
+ #model.summary()
348
+ #model = mini_XCEPTION(input_shape, num_classes)
349
+ #model.summary()
350
+ #model = big_XCEPTION(input_shape, num_classes)
351
+ #model.summary()
352
+ model = simple_CNN((48, 48, 1), num_classes)
353
+ model.summary()
real_time_video.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from keras.preprocessing.image import img_to_array
2
+ import imutils
3
+ import cv2
4
+ from keras.models import load_model
5
+ import numpy as np
6
+
7
+ # parameters for loading data and images
8
+ detection_model_path = 'haarcascade_files/haarcascade_frontalface_default.xml'
9
+ emotion_model_path = 'models/_mini_XCEPTION.102-0.66.hdf5'
10
+
11
+ # hyper-parameters for bounding boxes shape
12
+ # loading models
13
+ face_detection = cv2.CascadeClassifier(detection_model_path)
14
+ emotion_classifier = load_model(emotion_model_path, compile=False)
15
+ EMOTIONS = ["angry" ,"disgust","scared", "happy", "sad", "surprised",
16
+ "neutral"]
17
+
18
+
19
+ #feelings_faces = []
20
+ #for index, emotion in enumerate(EMOTIONS):
21
+ # feelings_faces.append(cv2.imread('emojis/' + emotion + '.png', -1))
22
+
23
+ # starting video streaming
24
+ cv2.namedWindow('your_face')
25
+ camera = cv2.VideoCapture(0)
26
+ while True:
27
+ frame = camera.read()[1]
28
+ #reading the frame
29
+ frame = imutils.resize(frame,width=300)
30
+ gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
31
+ faces = face_detection.detectMultiScale(gray,scaleFactor=1.1,minNeighbors=5,minSize=(30,30),flags=cv2.CASCADE_SCALE_IMAGE)
32
+
33
+ canvas = np.zeros((250, 300, 3), dtype="uint8")
34
+ frameClone = frame.copy()
35
+ if len(faces) > 0:
36
+ faces = sorted(faces, reverse=True,
37
+ key=lambda x: (x[2] - x[0]) * (x[3] - x[1]))[0]
38
+ (fX, fY, fW, fH) = faces
39
+ # Extract the ROI of the face from the grayscale image, resize it to a fixed 28x28 pixels, and then prepare
40
+ # the ROI for classification via the CNN
41
+ roi = gray[fY:fY + fH, fX:fX + fW]
42
+ roi = cv2.resize(roi, (64, 64))
43
+ roi = roi.astype("float") / 255.0
44
+ roi = img_to_array(roi)
45
+ roi = np.expand_dims(roi, axis=0)
46
+
47
+
48
+ preds = emotion_classifier.predict(roi)[0]
49
+ emotion_probability = np.max(preds)
50
+ label = EMOTIONS[preds.argmax()]
51
+ else: continue
52
+
53
+
54
+ for (i, (emotion, prob)) in enumerate(zip(EMOTIONS, preds)):
55
+ # construct the label text
56
+ text = "{}: {:.2f}%".format(emotion, prob * 100)
57
+
58
+ # draw the label + probability bar on the canvas
59
+ # emoji_face = feelings_faces[np.argmax(preds)]
60
+
61
+
62
+ w = int(prob * 300)
63
+ cv2.rectangle(canvas, (7, (i * 35) + 5),
64
+ (w, (i * 35) + 35), (0, 0, 255), -1)
65
+ cv2.putText(canvas, text, (10, (i * 35) + 23),
66
+ cv2.FONT_HERSHEY_SIMPLEX, 0.45,
67
+ (255, 255, 255), 2)
68
+ cv2.putText(frameClone, label, (fX, fY - 10),
69
+ cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
70
+ cv2.rectangle(frameClone, (fX, fY), (fX + fW, fY + fH),
71
+ (0, 0, 255), 2)
72
+ # for c in range(0, 3):
73
+ # frame[200:320, 10:130, c] = emoji_face[:, :, c] * \
74
+ # (emoji_face[:, :, 3] / 255.0) + frame[200:320,
75
+ # 10:130, c] * (1.0 - emoji_face[:, :, 3] / 255.0)
76
+
77
+
78
+ cv2.imshow('your_face', frameClone)
79
+ cv2.imshow("Probabilities", canvas)
80
+ if cv2.waitKey(1) & 0xFF == ord('q'):
81
+ break
82
+
83
+ camera.release()
84
+ cv2.destroyAllWindows()
requirements.txt CHANGED
@@ -1 +1,7 @@
1
- nltk==3.5
 
 
 
 
 
 
1
+ opencv_python==4.1.1.26
2
+ tensorflow==1.14
3
+ Keras==2.3.1
4
+ pandas==0.25.3
5
+ numpy==1.17.4
6
+ imutils==0.5.3
7
+ scikit_learn==0.22.1
test.png ADDED
thumbnail.png ADDED
train_emotion_classifier.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Description: Train emotion classification model
3
+ """
4
+
5
+ from keras.callbacks import CSVLogger, ModelCheckpoint, EarlyStopping
6
+ from keras.callbacks import ReduceLROnPlateau
7
+ from keras.preprocessing.image import ImageDataGenerator
8
+ from load_and_process import load_fer2013
9
+ from load_and_process import preprocess_input
10
+ from models.cnn import mini_XCEPTION
11
+ from sklearn.model_selection import train_test_split
12
+
13
+ # parameters
14
+ batch_size = 32
15
+ num_epochs = 10000
16
+ input_shape = (48, 48, 1)
17
+ validation_split = .2
18
+ verbose = 1
19
+ num_classes = 7
20
+ patience = 50
21
+ base_path = 'models/'
22
+
23
+ # data generator
24
+ data_generator = ImageDataGenerator(
25
+ featurewise_center=False,
26
+ featurewise_std_normalization=False,
27
+ rotation_range=10,
28
+ width_shift_range=0.1,
29
+ height_shift_range=0.1,
30
+ zoom_range=.1,
31
+ horizontal_flip=True)
32
+
33
+ # model parameters/compilation
34
+ model = mini_XCEPTION(input_shape, num_classes)
35
+ model.compile(optimizer='adam', loss='categorical_crossentropy',
36
+ metrics=['accuracy'])
37
+ model.summary()
38
+
39
+
40
+
41
+
42
+
43
+ # callbacks
44
+ log_file_path = base_path + '_emotion_training.log'
45
+ csv_logger = CSVLogger(log_file_path, append=False)
46
+ early_stop = EarlyStopping('val_loss', patience=patience)
47
+ reduce_lr = ReduceLROnPlateau('val_loss', factor=0.1,
48
+ patience=int(patience/4), verbose=1)
49
+ trained_models_path = base_path + '_mini_XCEPTION'
50
+ model_names = trained_models_path + '.{epoch:02d}-{val_acc:.2f}.hdf5'
51
+ model_checkpoint = ModelCheckpoint(model_names, 'val_loss', verbose=1,
52
+ save_best_only=True)
53
+ callbacks = [model_checkpoint, csv_logger, early_stop, reduce_lr]
54
+
55
+ # loading dataset
56
+ faces, emotions = load_fer2013()
57
+ faces = preprocess_input(faces)
58
+ num_samples, num_classes = emotions.shape
59
+ xtrain, xtest,ytrain,ytest = train_test_split(faces, emotions,test_size=0.2,shuffle=True)
60
+ model.fit_generator(data_generator.flow(xtrain, ytrain,
61
+ batch_size),
62
+ steps_per_epoch=len(xtrain) / batch_size,
63
+ epochs=num_epochs, verbose=1, callbacks=callbacks,
64
+ validation_data=(xtest,ytest))