Upload 5 files
Browse files- AGNew.h5 +3 -0
- haarcascade_frontalface_default.xml +0 -0
- main.py +89 -0
- mymodel.h5 +3 -0
- requirements.txt +6 -0
AGNew.h5
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2fb9b96c55e446e4eb163d0b6823bbe71b425499dee55193bfda18d99d93f480
|
3 |
+
size 291801472
|
haarcascade_frontalface_default.xml
ADDED
The diff for this file is too large to render.
See raw diff
|
|
main.py
ADDED
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import cv2
|
3 |
+
from PIL import Image
|
4 |
+
import numpy as np
|
5 |
+
from keras.models import load_model
|
6 |
+
|
7 |
+
# Load models
|
8 |
+
mymodel = load_model('./models/AGNew.h5')
|
9 |
+
Emotion_model = load_model("./models/mymodel.h5")
|
10 |
+
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
|
11 |
+
|
12 |
+
# Define emotion labels
|
13 |
+
emotion_labels = ["Anger", "Disgust", "Fear", "Happy", "Neutral", "Sad", "Surprise"]
|
14 |
+
|
15 |
+
# Define a function to process the uploaded image
|
16 |
+
def process_image(img):
|
17 |
+
# Convert image to grayscale
|
18 |
+
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
19 |
+
# Detect faces in the image
|
20 |
+
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
|
21 |
+
predictions = []
|
22 |
+
for (x, y, w, h) in faces:
|
23 |
+
# Extract face region
|
24 |
+
face_img = img[y:y+h, x:x+w]
|
25 |
+
# Resize face image for age and gender model
|
26 |
+
face_resized = cv2.resize(face_img, (200, 200))
|
27 |
+
normalized_face = face_resized / 255.0
|
28 |
+
normalized_face = np.expand_dims(normalized_face, axis=0)
|
29 |
+
# Resize face image for emotion model
|
30 |
+
face_resized1 = cv2.resize(face_img, (48, 48))
|
31 |
+
normalized_face1 = face_resized1 / 255.0
|
32 |
+
normalized_face1 = np.expand_dims(normalized_face1, axis=0)
|
33 |
+
# Predict age and gender
|
34 |
+
pred_age = int(mymodel.predict(normalized_face)[0][0][0])
|
35 |
+
pred_gender = mymodel.predict(normalized_face)[1][0][0]
|
36 |
+
# Predict emotion
|
37 |
+
pred_emotion = Emotion_model.predict(normalized_face1)[0]
|
38 |
+
# Append predictions
|
39 |
+
predictions.append({
|
40 |
+
"x": x,
|
41 |
+
"y": y,
|
42 |
+
"w": w,
|
43 |
+
"h": h,
|
44 |
+
"age": pred_age,
|
45 |
+
"gender": pred_gender,
|
46 |
+
"emotion": emotion_labels[np.argmax(pred_emotion)]
|
47 |
+
})
|
48 |
+
return predictions
|
49 |
+
|
50 |
+
# Streamlit app
|
51 |
+
|
52 |
+
def main():
|
53 |
+
|
54 |
+
# st.set_page_config(page_title="Age , Emotion and Gender Recognition", page_icon=":)", layout="wide", initial_sidebar_state="expanded")
|
55 |
+
|
56 |
+
st.title("Age , Emotion and Gender Recognition")
|
57 |
+
|
58 |
+
uploaded_file = st.file_uploader("Upload Image", type=['png', 'jpg', 'jpeg'])
|
59 |
+
|
60 |
+
if uploaded_file is not None:
|
61 |
+
file_bytes = np.asarray(bytearray(uploaded_file.read()), dtype=np.uint8)
|
62 |
+
img = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR)
|
63 |
+
predictions = process_image(img)
|
64 |
+
|
65 |
+
# Draw bounding boxes and predictions on the image
|
66 |
+
for i, prediction in enumerate(predictions, start=1):
|
67 |
+
x, y, w, h = prediction['x'], prediction['y'], prediction['w'], prediction['h']
|
68 |
+
cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 5)
|
69 |
+
age = int(prediction['age'])
|
70 |
+
gender = "Female" if prediction['gender'] > 0.5 else "Male"
|
71 |
+
label = f"{i}"
|
72 |
+
cv2.putText(img, label, (x, y + h + 35), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255), 10)
|
73 |
+
|
74 |
+
# Convert image to PNG format
|
75 |
+
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
76 |
+
img_pil = Image.fromarray(img)
|
77 |
+
|
78 |
+
# Display image with predictions
|
79 |
+
st.image(img_pil, caption="Uploaded Image", use_column_width=True)
|
80 |
+
|
81 |
+
# Display predictions
|
82 |
+
for i, prediction in enumerate(predictions, start=1):
|
83 |
+
st.subheader(f"Prediction {i}:")
|
84 |
+
st.write(f"Age: {prediction['age']}")
|
85 |
+
st.write(f"Gender: {'Female' if prediction['gender'] > 0.5 else 'Male'}")
|
86 |
+
st.write(f"Emotion: {prediction['emotion']}")
|
87 |
+
|
88 |
+
if __name__ == "__main__":
|
89 |
+
main()
|
mymodel.h5
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7cfd209147bc566b0cb737aa613c507aaf6e23db08c7426b70b085b991d48054
|
3 |
+
size 343974136
|
requirements.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
tensorflow==2.14.0
|
2 |
+
numpy>=1.18.5
|
3 |
+
pandas>=1.0.0
|
4 |
+
opencv-python
|
5 |
+
flask==3.0.2
|
6 |
+
uvicorn
|