import gradio as gr import tensorflow as tf import numpy as np from scipy.spatial.distance import cosine import cv2 import os RECOGNITION_THRESHOLD = 0.3 # Load the embedding model embedding_model = tf.keras.models.load_model('full_mode2.pth') # Database to store embeddings and user IDs user_embeddings = {} # Preprocess the image def preprocess_image(image): image = cv2.resize(image, (375, 375)) # Resize image image = tf.keras.applications.resnet50.preprocess_input(image) return np.expand_dims(image, axis=0) # Generate embedding def generate_embedding(image): preprocessed_image = preprocess_image(image) return embedding_model.predict(preprocessed_image)[0] # Register new user def register_user(image, user_id): try: embedding = generate_embedding(image) user_embeddings[user_id] = embedding return f"User {user_id} registered successfully." except Exception as e: return f"Error during registration: {str(e)}" # Recognize user def recognize_user(image): try: new_embedding = generate_embedding(image) min_distance = float('inf') recognized_user_id = "Unknown" for user_id, embedding in user_embeddings.items(): distance = cosine(new_embedding, embedding) print(f"Distance for {user_id}: {distance}") # Debug: Print distances if distance < min_distance: min_distance = distance recognized_user_id = user_id print(f"Min distance: {min_distance}") # Debug: Print minimum distance if min_distance > RECOGNITION_THRESHOLD: return "User not recognized." else: return f"Recognized User: {recognized_user_id}" except Exception as e: return f"Error during recognition: {str(e)}" def main(): with gr.Blocks() as demo: gr.Markdown("Facial Recognition System") with gr.Tab("Register"): with gr.Row(): img_register = gr.Image() user_id = gr.Textbox(label="User ID") register_button = gr.Button("Register") register_output = gr.Textbox() register_button.click(register_user, inputs=[img_register, user_id], outputs=register_output) with gr.Tab("Recognize"): with gr.Row(): img_recognize = gr.Image() recognize_button = gr.Button("Recognize") recognize_output = gr.Textbox() recognize_button.click(recognize_user, inputs=[img_recognize], outputs=recognize_output) demo.launch(share=True) if __name__ == "__main__": main()