File size: 3,131 Bytes
b3e744e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e3a07aa
73db645
b3e744e
 
73db645
 
1c0ab62
73db645
 
7fa5c89
 
 
73db645
7fa5c89
73db645
7fa5c89
 
 
 
4cb596c
73db645
7fa5c89
509e709
7fa5c89
 
ff436b2
 
 
 
b3e744e
ff436b2
73db645
 
7fa5c89
 
 
 
 
 
 
 
73db645
7fa5c89
 
 
 
 
 
509e709
73db645
7fa5c89
509e709
b3e744e
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
import numpy as np
import pandas as pd
from deepface import DeepFace
import streamlit as st
import cv2
import base64
import time

st.set_page_config(layout="wide")

cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')

def upload():
    image=None
    initial_image = st.camera_input('Take a picture')
    original_image = initial_image
    temp_path = None
    if initial_image is not None:
        bytes_data = initial_image.getvalue()
        image = cv2.imdecode(np.frombuffer(bytes_data, np.uint8), cv2.IMREAD_COLOR)

    return image, original_image


    

def main(options):
    
   
    
    
   
    if st.checkbox('Take a picture for prediction'):
        
        image, original_image= upload()
        if original_image is not None and original_image is not None and st.button('Prediction'):  # Check if original_image is not None
            st.warning('Wait for few seconds!!')
            progress_bar = st.progress(0.0)
            status_text = st.empty()
            
            result = DeepFace.analyze(image,detector_backend=options,actions=['age','gender','emotion','race'])
            
            for i in range(100):
                progress_bar.progress((i + 1) / 100)
                status_text.text(f"Processing {i+1}%")
                time.sleep(0.01)

            
            progress_bar.empty()
            
            gray_frame = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
            faces = cascade.detectMultiScale(gray_frame, 1.1, 3)
            faces = sorted(faces, key=lambda f: -f[2] * f[3])

            if len(faces) > 0:
                x,y,w,h=faces[0]

            
                
                
                cv2.rectangle(image, (x, y), (x+w, y+h), (4, 29, 255), 2, cv2.LINE_4)
                user_selected_items = list(result[0].keys())
                if 'age' in user_selected_items:
                    age_label='Age: '+str(result[0]['age'])
                    cv2.putText(image, age_label, (x ,y+h+30), cv2.FONT_ITALIC,1 ,(255,255,0), 2)
                if 'dominant_gender' in user_selected_items:
                    gender_label='Gender: '+str(result[0]['dominant_gender'])
                    cv2.putText(image, gender_label, (x, y+h+70), cv2.FONT_ITALIC,1, (0,255,255), 2)
                
                if 'dominant_emotion' in user_selected_items:
                    emotion_label='Emotion: '+str(result[0]['dominant_emotion']).title()
                    cv2.putText(image, emotion_label, (x, y+h+110), cv2.FONT_ITALIC,1 ,(255,0,255), 2)

                if 'dominant_race' in user_selected_items:
                    emotion_label='Race: '+str(result[0]['dominant_race']).title()
                    cv2.putText(image, emotion_label, (x, y+h+150), cv2.FONT_ITALIC,1 ,(255,255,255), 2)
            
            st.image(image, channels='BGR')
            st.balloons()
           

   
if __name__ == '__main__':
    def get_options():
        actions = ['opencv','mtcnn','retinaface']
        option2 = st.selectbox('Choose the following backend:', actions)
        return option2
   
    main(get_options())