File size: 6,552 Bytes
149ef67 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 |
import streamlit as st
from PIL import Image
from deepface import DeepFace
import tempfile
import pandas as pd
import cv2 as cv
import threading
from time import sleep
st.title('Image Upload and Verification App')
st.write('Please upload two images for facial verification.')
# Upload two images
uploaded_file1 = st.file_uploader("Choose the first image...", type=["jpg", "png", "jpeg"], key="1")
uploaded_file2 = st.file_uploader("Choose the second image...", type=["jpg", "png", "jpeg"], key="2")
# Define the global variables
df = None
analyze_img1 = None
analyze_img2 = None
def verify(img1_path, img2_path):
global df
model_name = 'VGG-Face' # You can change this to other models like "Facenet", "OpenFace", "DeepFace", etc.
result = DeepFace.verify(img1_path=img1_path, img2_path=img2_path, model_name=model_name)
result["img1_facial_areas"] = result["facial_areas"]["img1"]
result["img2_facial_areas"] = result["facial_areas"]["img2"]
del result["facial_areas"]
df = pd.DataFrame([result])
def analyze_image1(img1_path):
global analyze_img1
analyze_img1 = DeepFace.analyze(img_path=img1_path)[0]
def analyze_image2(img2_path):
global analyze_img2
analyze_img2 = DeepFace.analyze(img_path=img2_path)[0]
def generate_analysis_sentence(analysis):
age = analysis['age']
gender = [i for i in analysis['gender'].keys()][-1]
dominant_emotion = analysis['dominant_emotion']
dominant_race = analysis['dominant_race']
# Highlight specific words in blue
age_html = f"<span style='color:blue'>{age}</span>"
gender_html = f"<span style='color:blue'>{gender}</span>"
dominant_emotion_html = f"<span style='color:blue'>{dominant_emotion}</span>"
dominant_race_html = f"<span style='color:blue'>{dominant_race}</span>"
return f"""The person in the image appears to be {age_html} years old, identified as '{gender_html}'.
The dominant emotion detected is {dominant_emotion_html}.
Ethnicity prediction indicates {dominant_race_html}."""
def display_image_with_analysis(image, analysis):
# Display the image
st.image(image, caption='Image', use_column_width=True)
# Display the analysis results
st.write("Analysis:")
st.markdown(generate_analysis_sentence(analysis), unsafe_allow_html=True)
def drow_rectangle():
# Load images with OpenCV
img1 = cv.imread(img1_path)
img2 = cv.imread(img2_path)
# Get facial areas and draw rectangles
face_area1 = df.iloc[0]["img1_facial_areas"]
p1_1 = (face_area1["x"], face_area1["y"])
p2_1 = (face_area1["x"] + face_area1["w"], face_area1["y"] + face_area1["h"])
rect_img1 = cv.rectangle(img1.copy(), p1_1, p2_1, (0, 255, 0), 2)
face_area2 = df.iloc[0]["img2_facial_areas"]
p1_2 = (face_area2["x"], face_area2["y"])
p2_2 = (face_area2["x"] + face_area2["w"], face_area2["y"] + face_area2["h"])
rect_img2 = cv.rectangle(img2.copy(), p1_2, p2_2, (0, 255, 0), 2)
# Resize images with a better interpolation method
rect_img1 = cv.cvtColor(rect_img1, cv.COLOR_BGR2RGB)
rect_img1 = cv.resize(rect_img1, (200, 250), interpolation=cv.INTER_AREA)
rect_img2 = cv.cvtColor(rect_img2, cv.COLOR_BGR2RGB)
rect_img2 = cv.resize(rect_img2, (200, 250), interpolation=cv.INTER_AREA)
#st.dataframe(df)
# Display the results
if df["verified"].iloc[0]:
message = "The faces in the images match!"
else:
message = "The faces in the images do not match!"
st.title(message)
col1, col2 = st.columns(2)
col1.image(rect_img1, caption='Verified Image 1', use_column_width=True)
col2.image(rect_img2, caption='Verified Image 2', use_column_width=True)
def get_analyze():
# Display the analysis results
st.write("Analysis for Image 1:")
try:
st.markdown(generate_analysis_sentence(analyze_img1), unsafe_allow_html=True)
except:
st.warning("can't detect image 1")
st.write("Analysis for Image 2:")
try:
st.markdown(generate_analysis_sentence(analyze_img2), unsafe_allow_html=True)
except:
st.warning("can't detect image 2")
col1, col2 = st.columns(2)
with col1:
st.text("Check if the faces in the images match!")
check = st.button("Check")
with col2:
st.text("Analyze the faces in each image!")
analyze = st.button("Analyze")
if uploaded_file1 is not None and uploaded_file2 is not None:
# Open the images with PIL
image1 = Image.open(uploaded_file1)
image2 = Image.open(uploaded_file2)
st.write("Here are your images:")
# Convert images to RGB if they are in RGBA mode
if image1.mode == 'RGBA':
image1 = image1.convert('RGB')
if image2.mode == 'RGBA':
image2 = image2.convert('RGB')
# Save the uploaded images to a temporary directory
with tempfile.NamedTemporaryFile(delete=False, suffix=".jpg") as tmp_file1:
image1.save(tmp_file1.name)
img1_path = tmp_file1.name
with tempfile.NamedTemporaryFile(delete=False, suffix=".jpg") as tmp_file2:
image2.save(tmp_file2.name)
img2_path = tmp_file2.name
t1 = threading.Thread(target=verify, args=(img1_path, img2_path))
t2 = threading.Thread(target=analyze_image1, args=(img1_path,))
t3 = threading.Thread(target=analyze_image2, args=(img2_path,))
t1.start()
t2.start()
t3.start()
t1.join()
if check and not t1.is_alive():
n = 0
while True:
try:
drow_rectangle()
sleep(2)
break
except:
n = n + 1
print(f"Try : {n}")
if n == 4:
st.warning("Please make sure there are people's faces in each of the two photos or try again")
break
t2.join()
t3.join()
if analyze:
n = 0
while t2.is_alive() or t3.is_alive():
sleep(2)
while True:
try:
get_analyze()
sleep(2)
break
except:
n = n + 1
print(f"Try : {n}")
if n == 4:
st.warning("Please make sure there are people's faces in each of the two photos or try again")
break
else:
st.write("Please upload both images to proceed.")
|