23mdts30 commited on
Commit
9678eea
1 Parent(s): 1caa5a9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -24
app.py CHANGED
@@ -4,36 +4,30 @@ import streamlit as st
4
  from PIL import Image
5
  from streamlit_webrtc import VideoProcessorBase, webrtc_streamer
6
 
7
- # Update these paths based on where you saved the models
8
  AGE_PROTOTXT_PATH = 'deploy_age.prototxt'
9
  AGE_MODEL_PATH = 'age_net.caffemodel'
10
  GENDER_PROTOTXT_PATH = 'deploy_gender.prototxt'
11
  GENDER_MODEL_PATH = 'gender_net.caffemodel'
12
 
13
- # Load pre-trained models for age and gender detection
14
  age_net = cv2.dnn.readNetFromCaffe(AGE_PROTOTXT_PATH, AGE_MODEL_PATH)
15
  gender_net = cv2.dnn.readNetFromCaffe(GENDER_PROTOTXT_PATH, GENDER_MODEL_PATH)
16
 
17
- # Define age and gender lists
18
  AGE_LIST = ['(0-2)', '(4-6)', '(8-12)', '(15-20)', '(25-32)', '(38-43)', '(48-53)', '(60-100)']
19
  GENDER_LIST = ['Male', 'Female']
20
 
21
  MODEL_MEAN_VALUES = (78.4263377603, 87.7689143744, 114.895847746)
22
 
23
  def analyze_frame(frame):
24
- # Convert frame to blob
25
  blob = cv2.dnn.blobFromImage(frame, 1.0, (227, 227), MODEL_MEAN_VALUES, swapRB=False)
26
-
27
- # Predict gender
28
  gender_net.setInput(blob)
29
  gender_preds = gender_net.forward()
30
  gender = GENDER_LIST[gender_preds[0].argmax()]
31
-
32
- # Predict age
33
  age_net.setInput(blob)
34
  age_preds = age_net.forward()
35
  age = AGE_LIST[age_preds[0].argmax()]
36
-
37
  return gender, age
38
 
39
  def detect_shirt_color(frame, face_coords):
@@ -41,51 +35,38 @@ def detect_shirt_color(frame, face_coords):
41
  shirt_region = frame[y + h:y + h + int(h / 2), x:x + w]
42
  avg_color_per_row = np.average(shirt_region, axis=0)
43
  avg_color = np.average(avg_color_per_row, axis=0)
44
-
45
- if np.all(avg_color > [200, 200, 200]): # Simplified check for white
46
  return 'white'
47
- elif np.all(avg_color < [50, 50, 50]): # Simplified check for black
48
  return 'black'
49
  else:
50
  return 'other'
51
 
52
  def process_frame(frame):
53
- # Convert frame to grayscale
54
  gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
55
- # Detect faces
56
  face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
57
  faces = face_cascade.detectMultiScale(gray, 1.1, 4)
58
-
59
  males = 0
60
  females = 0
61
-
62
  for (x, y, w, h) in faces:
63
  face = frame[y:y+h, x:x+w]
64
  gender, age = analyze_frame(face)
65
-
66
  shirt_color = detect_shirt_color(frame, (x, y, w, h))
67
  if shirt_color == 'white':
68
  age = '23'
69
  elif shirt_color == 'black':
70
  age = 'Child'
71
-
72
  if len(faces) < 2:
73
- # Skip shirt color logic if there are less than 2 people
74
  gender, age = analyze_frame(face)
75
-
76
  if gender == 'Male':
77
  males += 1
78
  else:
79
  females += 1
80
-
81
  label_text = f'{gender}, {age}'
82
  cv2.putText(frame, label_text, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
83
  cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2)
84
-
85
- # Display the number of males and females
86
  info_text = f'Males: {males}, Females: {females}'
87
  cv2.putText(frame, info_text, (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
88
-
89
  return frame
90
 
91
  class VideoProcessor(VideoProcessorBase):
 
4
  from PIL import Image
5
  from streamlit_webrtc import VideoProcessorBase, webrtc_streamer
6
 
7
+ # Paths to the models
8
  AGE_PROTOTXT_PATH = 'deploy_age.prototxt'
9
  AGE_MODEL_PATH = 'age_net.caffemodel'
10
  GENDER_PROTOTXT_PATH = 'deploy_gender.prototxt'
11
  GENDER_MODEL_PATH = 'gender_net.caffemodel'
12
 
13
+ # Load the models
14
  age_net = cv2.dnn.readNetFromCaffe(AGE_PROTOTXT_PATH, AGE_MODEL_PATH)
15
  gender_net = cv2.dnn.readNetFromCaffe(GENDER_PROTOTXT_PATH, GENDER_MODEL_PATH)
16
 
17
+ # Lists for age and gender
18
  AGE_LIST = ['(0-2)', '(4-6)', '(8-12)', '(15-20)', '(25-32)', '(38-43)', '(48-53)', '(60-100)']
19
  GENDER_LIST = ['Male', 'Female']
20
 
21
  MODEL_MEAN_VALUES = (78.4263377603, 87.7689143744, 114.895847746)
22
 
23
  def analyze_frame(frame):
 
24
  blob = cv2.dnn.blobFromImage(frame, 1.0, (227, 227), MODEL_MEAN_VALUES, swapRB=False)
 
 
25
  gender_net.setInput(blob)
26
  gender_preds = gender_net.forward()
27
  gender = GENDER_LIST[gender_preds[0].argmax()]
 
 
28
  age_net.setInput(blob)
29
  age_preds = age_net.forward()
30
  age = AGE_LIST[age_preds[0].argmax()]
 
31
  return gender, age
32
 
33
  def detect_shirt_color(frame, face_coords):
 
35
  shirt_region = frame[y + h:y + h + int(h / 2), x:x + w]
36
  avg_color_per_row = np.average(shirt_region, axis=0)
37
  avg_color = np.average(avg_color_per_row, axis=0)
38
+ if np.all(avg_color > [200, 200, 200]):
 
39
  return 'white'
40
+ elif np.all(avg_color < [50, 50, 50]):
41
  return 'black'
42
  else:
43
  return 'other'
44
 
45
  def process_frame(frame):
 
46
  gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
 
47
  face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
48
  faces = face_cascade.detectMultiScale(gray, 1.1, 4)
 
49
  males = 0
50
  females = 0
 
51
  for (x, y, w, h) in faces:
52
  face = frame[y:y+h, x:x+w]
53
  gender, age = analyze_frame(face)
 
54
  shirt_color = detect_shirt_color(frame, (x, y, w, h))
55
  if shirt_color == 'white':
56
  age = '23'
57
  elif shirt_color == 'black':
58
  age = 'Child'
 
59
  if len(faces) < 2:
 
60
  gender, age = analyze_frame(face)
 
61
  if gender == 'Male':
62
  males += 1
63
  else:
64
  females += 1
 
65
  label_text = f'{gender}, {age}'
66
  cv2.putText(frame, label_text, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
67
  cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2)
 
 
68
  info_text = f'Males: {males}, Females: {females}'
69
  cv2.putText(frame, info_text, (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
 
70
  return frame
71
 
72
  class VideoProcessor(VideoProcessorBase):