Spaces:
Runtime error
Runtime error
File size: 3,560 Bytes
b3e744e 8f1240e 9379b48 b3e744e e3a07aa b3e744e c67d4ad 1c0ab62 9379b48 c67d4ad 7fa5c89 9379b48 1c0ab62 9023f4a 7fa5c89 b3e744e 7fa5c89 b3e744e 7fa5c89 b3e744e 7fa5c89 28c8029 7fa5c89 b3e744e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 |
import numpy as np
import pandas as pd
from deepface import DeepFace
import streamlit as st
import cv2
import base64
import time
st.set_page_config(layout="wide")
cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
import tempfile
import os
weights_paths = {
'age': '/home/appuser/.deepface/weights/age_model_weights.h5',
'gender': '/home/appuser/.deepface/weights/gender_model_weights.h5',
'race': '/home/appuser/.deepface/weights/race_model_single_batch.h5',
'emotion': '/home/appuser/.deepface/weights/facial_expression_model_weights.h5'
}
def upload():
image=None
initial_image = st.camera_input('Take a picture')
original_image = initial_image
temp_path = None
if initial_image is not None:
bytes_data = initial_image.getvalue()
image = cv2.imdecode(np.frombuffer(bytes_data, np.uint8), cv2.IMREAD_COLOR)
return image, original_image
def main(options):
col1,col2=st.columns(2)
image=None
original_image=None
with col1:
if st.checkbox('Take a picture for prediction'):
with col1:
image, original_image= upload()
with col2:
if original_image is not None and original_image is not None and st.button('Detect'):
st.warning('Wait for few seconds!!')
progress_bar = st.progress(0.0)
status_text = st.empty()
result = DeepFace.analyze(image,detector_backend=options,actions=['age','gender','emotion','race'])
for i in range(100):
progress_bar.progress((i + 1) / 100)
status_text.text(f"Processing {i+1}%")
time.sleep(0.01)
progress_bar.empty()
gray_frame = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
faces = cascade.detectMultiScale(gray_frame, 1.1, 3)
for x,y,w,h in faces:
cv2.rectangle(image, (x, y), (x+w, y+h), (4, 29, 255), 2, cv2.LINE_4)
user_selected_items = list(result[0].keys())
if 'age' in user_selected_items:
age_label='Age: '+str(result[0]['age'])
cv2.putText(image, age_label, (x ,y+h+30), cv2.FONT_ITALIC,1 ,(255,255,0), 2)
if 'dominant_gender' in user_selected_items:
gender_label='Gender: '+str(result[0]['dominant_gender'])
cv2.putText(image, gender_label, (x, y+h+70), cv2.FONT_ITALIC,1, (0,255,255), 2)
if 'dominant_emotion' in user_selected_items:
emotion_label='Emotion: '+str(result[0]['dominant_emotion']).title()
cv2.putText(image, emotion_label, (x, y+h+110), cv2.FONT_ITALIC,1 ,(255,0,255), 2)
if 'dominant_race' in user_selected_items:
emotion_label='Race: '+str(result[0]['dominant_race']).title()
cv2.putText(image, emotion_label, (x, y+h+150), cv2.FONT_ITALIC,1 ,(51,102,0), 2)
st.image(image, channels='BGR')
if __name__ == '__main__':
def get_options():
actions = ['opencv','mtcnn','retinaface']
option2 = st.selectbox('Choose the following backend:', actions)
return option2
main(get_options())
|