Emotion_Detection_Using_Face_and_Speech / emotion_detection_using_face.py
prince22's picture
Upload 4 files
cbda2ee verified
from flask import Flask, request, redirect, url_for, render_template, send_file, Response
import os
import cv2
import numpy as np
from tensorflow.keras.models import model_from_json
import tensorflow as tf
# Constants
TARGET_SIZE = (48, 48)
BATCH_SIZE = 64
EMOTION_DICT = {0: "Angry", 1: "Disgusted", 2: "Fearful", 3: "Happy", 4: "Neutral", 5: "Sad", 6: "Surprised"}
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = 'uploads'
app.config['PROCESSED_FOLDER'] = 'processed'
os.makedirs(app.config['UPLOAD_FOLDER'], exist_ok=True)
os.makedirs(app.config['PROCESSED_FOLDER'], exist_ok=True)
def load_model():
with open('model/emotion_model.json', 'r') as json_file:
loaded_model_json = json_file.read()
try:
emotion_model = model_from_json(loaded_model_json, custom_objects={'Sequential': tf.keras.models.Sequential})
except TypeError:
emotion_model = model_from_json(loaded_model_json, custom_objects={'Sequential': tf.keras.models.Sequential})
emotion_model.load_weights("model/emotion_model.h5")
print("Loaded model from disk")
return emotion_model
emotion_model = load_model()
def process_image(file_path, output_path):
img = cv2.imread(file_path)
if img is None:
print(f"Error loading image {file_path}")
return
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
face_detector = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_default.xml')
faces = face_detector.detectMultiScale(gray_img, scaleFactor=1.3, minNeighbors=5)
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y-50), (x+w, y+h+10), (0, 255, 0), 4)
roi_gray_frame = gray_img[y:y + h, x:x + w]
cropped_img = np.expand_dims(np.expand_dims(cv2.resize(roi_gray_frame, TARGET_SIZE), -1), 0)
emotion_prediction = emotion_model.predict(cropped_img)
maxindex = int(np.argmax(emotion_prediction))
cv2.putText(img, EMOTION_DICT[maxindex], (x+5, y-20), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2, cv2.LINE_AA)
cv2.imwrite(output_path, img)
def process_video(file_path, output_path):
cap = cv2.VideoCapture(file_path)
face_detector = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_default.xml')
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter(output_path, fourcc, 20.0, (int(cap.get(3)), int(cap.get(4))))
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
num_faces = face_detector.detectMultiScale(gray_frame, scaleFactor=1.3, minNeighbors=5)
for (x, y, w, h) in num_faces:
cv2.rectangle(frame, (x, y-50), (x+w, y+h+10), (0, 255, 0), 4)
roi_gray_frame = gray_frame[y:y + h, x:x + w]
cropped_img = np.expand_dims(np.expand_dims(cv2.resize(roi_gray_frame, TARGET_SIZE), -1), 0)
emotion_prediction = emotion_model.predict(cropped_img)
maxindex = int(np.argmax(emotion_prediction))
cv2.putText(frame, EMOTION_DICT[maxindex], (x+5, y-20), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2, cv2.LINE_AA)
out.write(frame)
cap.release()
out.release()
cv2.destroyAllWindows()
def gen_frames():
cap = cv2.VideoCapture(0)
face_detector = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_default.xml')
while True:
success, frame = cap.read()
if not success:
break
else:
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_detector.detectMultiScale(gray_frame, scaleFactor=1.3, minNeighbors=5)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y-50), (x+w, y+h+10), (0, 255, 0), 4)
roi_gray_frame = gray_frame[y:y + h, x:x + w]
cropped_img = np.expand_dims(np.expand_dims(cv2.resize(roi_gray_frame, TARGET_SIZE), -1), 0)
emotion_prediction = emotion_model.predict(cropped_img)
maxindex = int(np.argmax(emotion_prediction))
cv2.putText(frame, EMOTION_DICT[maxindex], (x+5, y-20), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2, cv2.LINE_AA)
ret, buffer = cv2.imencode('.jpg', frame)
frame = buffer.tobytes()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
cap.release()
@app.route('/')
def index():
return render_template('camera.html')
@app.route('/upload', methods=['POST'])
def upload_file():
if 'file' not in request.files:
return redirect(request.url)
file = request.files['file']
if file.filename == '':
return redirect(request.url)
if file:
file_path = os.path.join(app.config['UPLOAD_FOLDER'], file.filename)
file.save(file_path)
output_path = os.path.join(app.config['PROCESSED_FOLDER'], file.filename)
if file.filename.lower().endswith(('png', 'jpg', 'jpeg')):
process_image(file_path, output_path)
elif file.filename.lower().endswith(('mp4', 'avi', 'mov', 'mkv')):
process_video(file_path, output_path)
else:
return 'Unsupported file type'
return redirect(url_for('download_file', filename=file.filename))
return 'File upload failed'
@app.route('/download/<filename>')
def download_file(filename):
return send_file(os.path.join(app.config['PROCESSED_FOLDER'], filename), as_attachment=True)
@app.route('/video_feed')
def video_feed():
return Response(gen_frames(), mimetype='multipart/x-mixed-replace; boundary=frame')
@app.route('/camera')
def camera():
return render_template('camera.html')
if __name__ == "__main__":
app.run(debug=True)