princeml commited on
Commit
4683c60
1 Parent(s): 67093d3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +160 -136
app.py CHANGED
@@ -1,151 +1,175 @@
 
 
 
 
 
 
 
1
  # import numpy as np
 
 
2
  # import cv2
3
  # import streamlit as st
4
  # from tensorflow import keras
5
  # from keras.models import model_from_json
6
- # from tensorflow.keras.utils import img_to_array
7
  # from streamlit_webrtc import webrtc_streamer, VideoTransformerBase, RTCConfiguration, VideoProcessorBase, WebRtcMode
8
- import numpy as np
9
- import tensorflow as tf
10
- from PIL import Image
11
- import cv2
12
- import streamlit as st
13
- from tensorflow import keras
14
- from keras.models import model_from_json
15
- from tensorflow.keras.utils import img_to_array
16
- from streamlit_webrtc import webrtc_streamer, VideoTransformerBase, RTCConfiguration, VideoProcessorBase, WebRtcMode
17
-
18
-
19
-
20
-
21
-
22
- # load model
23
-
24
- emotion_dict = {0:'angry', 1 :'happy', 2: 'neutral', 3:'sad', 4: 'surprise'}
25
- # load json and create model
26
- json_file = open('emotion_model1.json', 'r')
27
- loaded_model_json = json_file.read()
28
- json_file.close()
29
- classifier = model_from_json(loaded_model_json)
30
- # load weights into new model
31
- classifier.load_weights("emotion_model1.h5")
32
-
33
- #load face
34
- try:
35
- face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
36
- except Exception:
37
- st.write("Error loading cascade classifiers")
38
-
39
- RTC_CONFIGURATION = RTCConfiguration({"iceServers": [{"urls": ["stun:stun.l.google.com:19302"]}]})
40
-
41
- class Faceemotion(VideoTransformerBase):
42
- def transform(self, frame):
43
- img = frame.to_ndarray(format="bgr24")
44
-
45
- #image gray
46
- img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
47
- faces = face_cascade.detectMultiScale(
48
- image=img_gray, scaleFactor=1.3, minNeighbors=5)
49
- for (x, y, w, h) in faces:
50
- cv2.rectangle(img=img, pt1=(x, y), pt2=(
51
- x + w, y + h), color=(255, 0, 0), thickness=2)
52
- roi_gray = img_gray[y:y + h, x:x + w]
53
- roi_gray = cv2.resize(roi_gray, (48, 48), interpolation=cv2.INTER_AREA)
54
- if np.sum([roi_gray]) != 0:
55
- roi = roi_gray.astype('float') / 255.0
56
- roi = img_to_array(roi)
57
- roi = np.expand_dims(roi, axis=0)
58
- prediction = classifier.predict(roi)[0]
59
- maxindex = int(np.argmax(prediction))
60
- finalout = emotion_dict[maxindex]
61
- output = str(finalout)
62
- label_position = (x, y)
63
- cv2.putText(img, 'i', label_position, cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
64
-
65
- return img
66
-
67
-
68
- def generate_prediction(input_image):
69
- # img = frame.to_ndarray(format="bgr24")
70
-
71
- #image gray
72
- img = input_image
73
- img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
74
- faces = face_cascade.detectMultiScale(
75
- image=img_gray, scaleFactor=1.3, minNeighbors=5)
76
- for (x, y, w, h) in faces:
77
- cv2.rectangle(img=img, pt1=(x, y), pt2=(
78
- x + w, y + h), color=(255, 0, 0), thickness=2)
79
- roi_gray = img_gray[y:y + h, x:x + w]
80
- roi_gray = cv2.resize(roi_gray, (48, 48), interpolation=cv2.INTER_AREA)
81
- if np.sum([roi_gray]) != 0:
82
- roi = roi_gray.astype('float') / 255.0
83
- roi = img_to_array(roi)
84
- roi = np.expand_dims(roi, axis=0)
85
- prediction = classifier.predict(roi)[0]
86
- maxindex = int(np.argmax(prediction))
87
- finalout = emotion_dict[maxindex]
88
- output = str(finalout)
89
- label_position = (x, y)
90
- cv2.putText(img, output, label_position, cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
91
-
92
- return img
93
-
94
- def main():
95
- # Face Analysis Application #
96
- st.title(" Face Emotion Detection Application")
97
- activiteis = ["Home", "Webcam Face Detection", "By Images","About"]
98
- choice = st.sidebar.selectbox("Select Activity", activiteis)
99
 
100
- if choice == "Home":
101
- html_temp_home1 = """<div style="background-color:#6D7B8D;padding:10px">
102
- <h3 style="color:yellow;text-align:center;"> Welcome to world of AI with Prince </h3>
103
- <h4 style="color:white;text-align:center;">
104
- Face Emotion detection application using OpenCV, Custom CNN model and Streamlit.</h4>
105
- </div>
106
- </br>"""
107
- st.markdown(html_temp_home1, unsafe_allow_html=True)
108
- st.write("""
109
- Real time face emotion recognization just by one click.
110
-
111
- """)
112
- elif choice == "Webcam Face Detection":
113
- st.header("Webcam Live Feed")
114
- st.write("Click on start to use webcam and detect your face emotion")
115
- webrtc_streamer(key="example", mode=WebRtcMode.SENDRECV, rtc_configuration=RTC_CONFIGURATION,
116
- video_processor_factory=Faceemotion)
117
- # st.video('https://www.youtube.com/watch?v=wyWmWaXapmI')
118
 
119
- elif choice == "By Images":
120
- st.header("Image Prediction App")
121
- uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"])
122
- if uploaded_file is not None:
123
- image = np.array(Image.open(uploaded_file))
124
 
125
- prediction = generate_prediction(image)
126
- st.image(prediction, use_column_width=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
127
 
128
- elif choice == "About":
129
- st.subheader("About this app")
130
- html_temp_about1= """<div style="background-color:#6D7B8D;padding:10px">
131
- <h4 style="color:white;text-align:center;">
132
- Real time face emotion detection application using OpenCV, Custom Trained CNN model and Streamlit.</h4>
133
- </div>
134
- </br>"""
135
- st.markdown(html_temp_about1, unsafe_allow_html=True)
136
 
137
- html_temp4 = """
138
- <div style="background-color:#98AFC7;padding:10px">
139
- <h4 style="color:white;text-align:center;">Thanks for Visiting</h4>
140
- </div>
141
- <br></br>
142
- <br></br>"""
143
 
144
- st.markdown(html_temp4, unsafe_allow_html=True)
145
 
146
- else:
147
- pass
148
 
149
 
150
- if __name__ == "__main__":
151
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # # import numpy as np
2
+ # # import cv2
3
+ # # import streamlit as st
4
+ # # from tensorflow import keras
5
+ # # from keras.models import model_from_json
6
+ # # from tensorflow.keras.utils import img_to_array
7
+ # # from streamlit_webrtc import webrtc_streamer, VideoTransformerBase, RTCConfiguration, VideoProcessorBase, WebRtcMode
8
  # import numpy as np
9
+ # import tensorflow as tf
10
+ # from PIL import Image
11
  # import cv2
12
  # import streamlit as st
13
  # from tensorflow import keras
14
  # from keras.models import model_from_json
15
+ # from tensorflow.keras.utils import img_to_array
16
  # from streamlit_webrtc import webrtc_streamer, VideoTransformerBase, RTCConfiguration, VideoProcessorBase, WebRtcMode
17
+
18
+
19
+
20
+
21
+
22
+ # # load model
23
+
24
+ # emotion_dict = {0:'angry', 1 :'happy', 2: 'neutral', 3:'sad', 4: 'surprise'}
25
+ # # load json and create model
26
+ # json_file = open('emotion_model1.json', 'r')
27
+ # loaded_model_json = json_file.read()
28
+ # json_file.close()
29
+ # classifier = model_from_json(loaded_model_json)
30
+ # # load weights into new model
31
+ # classifier.load_weights("emotion_model1.h5")
32
+
33
+ # #load face
34
+ # try:
35
+ # face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
36
+ # except Exception:
37
+ # st.write("Error loading cascade classifiers")
38
+
39
+ # RTC_CONFIGURATION = RTCConfiguration({"iceServers": [{"urls": ["stun:stun.l.google.com:19302"]}]})
40
+
41
+ # class Faceemotion(VideoTransformerBase):
42
+ # def transform(self, frame):
43
+ # img = frame.to_ndarray(format="bgr24")
44
+
45
+ # #image gray
46
+ # img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
47
+ # faces = face_cascade.detectMultiScale(
48
+ # image=img_gray, scaleFactor=1.3, minNeighbors=5)
49
+ # for (x, y, w, h) in faces:
50
+ # cv2.rectangle(img=img, pt1=(x, y), pt2=(
51
+ # x + w, y + h), color=(255, 0, 0), thickness=2)
52
+ # roi_gray = img_gray[y:y + h, x:x + w]
53
+ # roi_gray = cv2.resize(roi_gray, (48, 48), interpolation=cv2.INTER_AREA)
54
+ # if np.sum([roi_gray]) != 0:
55
+ # roi = roi_gray.astype('float') / 255.0
56
+ # roi = img_to_array(roi)
57
+ # roi = np.expand_dims(roi, axis=0)
58
+ # prediction = classifier.predict(roi)[0]
59
+ # maxindex = int(np.argmax(prediction))
60
+ # finalout = emotion_dict[maxindex]
61
+ # output = str(finalout)
62
+ # label_position = (x, y)
63
+ # cv2.putText(img, 'i', label_position, cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
64
+
65
+ # return img
66
+
67
+
68
+ # def generate_prediction(input_image):
69
+ # # img = frame.to_ndarray(format="bgr24")
70
+
71
+ # #image gray
72
+ # img = input_image
73
+ # img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
74
+ # faces = face_cascade.detectMultiScale(
75
+ # image=img_gray, scaleFactor=1.3, minNeighbors=5)
76
+ # for (x, y, w, h) in faces:
77
+ # cv2.rectangle(img=img, pt1=(x, y), pt2=(
78
+ # x + w, y + h), color=(255, 0, 0), thickness=2)
79
+ # roi_gray = img_gray[y:y + h, x:x + w]
80
+ # roi_gray = cv2.resize(roi_gray, (48, 48), interpolation=cv2.INTER_AREA)
81
+ # if np.sum([roi_gray]) != 0:
82
+ # roi = roi_gray.astype('float') / 255.0
83
+ # roi = img_to_array(roi)
84
+ # roi = np.expand_dims(roi, axis=0)
85
+ # prediction = classifier.predict(roi)[0]
86
+ # maxindex = int(np.argmax(prediction))
87
+ # finalout = emotion_dict[maxindex]
88
+ # output = str(finalout)
89
+ # label_position = (x, y)
90
+ # cv2.putText(img, output, label_position, cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
91
+
92
+ # return img
93
+
94
+ # def main():
95
+ # # Face Analysis Application #
96
+ # st.title(" Face Emotion Detection Application")
97
+ # activiteis = ["Home", "Webcam Face Detection", "By Images","About"]
98
+ # choice = st.sidebar.selectbox("Select Activity", activiteis)
 
 
 
 
 
 
 
 
 
99
 
100
+ # if choice == "Home":
101
+ # html_temp_home1 = """<div style="background-color:#6D7B8D;padding:10px">
102
+ # <h3 style="color:yellow;text-align:center;"> Welcome to world of AI with Prince </h3>
103
+ # <h4 style="color:white;text-align:center;">
104
+ # Face Emotion detection application using OpenCV, Custom CNN model and Streamlit.</h4>
105
+ # </div>
106
+ # </br>"""
107
+ # st.markdown(html_temp_home1, unsafe_allow_html=True)
108
+ # st.write("""
109
+ # Real time face emotion recognization just by one click.
110
+
111
+ # """)
112
+ # elif choice == "Webcam Face Detection":
113
+ # st.header("Webcam Live Feed")
114
+ # st.write("Click on start to use webcam and detect your face emotion")
115
+ # webrtc_streamer(key="example", mode=WebRtcMode.SENDRECV, rtc_configuration=RTC_CONFIGURATION,
116
+ # video_processor_factory=Faceemotion)
117
+ # # st.video('https://www.youtube.com/watch?v=wyWmWaXapmI')
118
 
119
+ # elif choice == "By Images":
120
+ # st.header("Image Prediction App")
121
+ # uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"])
122
+ # if uploaded_file is not None:
123
+ # image = np.array(Image.open(uploaded_file))
124
 
125
+ # prediction = generate_prediction(image)
126
+ # st.image(prediction, use_column_width=True)
127
+
128
+ # elif choice == "About":
129
+ # st.subheader("About this app")
130
+ # html_temp_about1= """<div style="background-color:#6D7B8D;padding:10px">
131
+ # <h4 style="color:white;text-align:center;">
132
+ # Real time face emotion detection application using OpenCV, Custom Trained CNN model and Streamlit.</h4>
133
+ # </div>
134
+ # </br>"""
135
+ # st.markdown(html_temp_about1, unsafe_allow_html=True)
136
+
137
+ # html_temp4 = """
138
+ # <div style="background-color:#98AFC7;padding:10px">
139
+ # <h4 style="color:white;text-align:center;">Thanks for Visiting</h4>
140
+ # </div>
141
+ # <br></br>
142
+ # <br></br>"""
143
 
144
+ # st.markdown(html_temp4, unsafe_allow_html=True)
 
 
 
 
 
 
 
145
 
146
+ # else:
147
+ # pass
 
 
 
 
148
 
 
149
 
150
+ # if __name__ == "__main__":
151
+ # main()
152
 
153
 
154
+
155
+ # import tensorflow as tf
156
+ import cv2
157
+ import numpy as np
158
+ # from glob import glob
159
+ # from models import Yolov4
160
+ import gradio as gr
161
+ # model = Yolov4(weight_path="yolov4.weights", class_name_path='coco_classes.txt')
162
+ def gradio_wrapper(img):
163
+ global model
164
+ #print(np.shape(img))
165
+ # results = model.predict(img)
166
+ # return results[0]
167
+ demo = gr.Interface(
168
+ gradio_wrapper,
169
+ #gr.Image(source="webcam", streaming=True, flip=True),
170
+ gr.Image(source="webcam", streaming=True),
171
+ "image",
172
+ live=True
173
+ )
174
+
175
+ demo.launch()