arsath-sm's picture
Update app.py
b46efd4 verified
raw
history blame contribute delete
No virus
1.95 kB
import gradio as gr
import tensorflow as tf
import numpy as np
from huggingface_hub import hf_hub_download
# Function to load model from Hugging Face Hub
def load_model_from_hub(repo_id, filename):
model_path = hf_hub_download(repo_id=repo_id, filename=filename)
return tf.keras.models.load_model(model_path)
# Load models from Hugging Face Hub
model1 = load_model_from_hub("arsath-sm/face_classification_model1", "face_classification_model1.h5")
model2 = load_model_from_hub("arsath-sm/face_classification_model2", "face_classification_model2.h5")
def preprocess_image(image):
img = tf.image.resize(image, (224, 224)) # Resize to match the input size of your models
img = tf.cast(img, tf.float32) / 255.0 # Normalize pixel values
return tf.expand_dims(img, 0) # Add batch dimension
def predict_image(image):
preprocessed_image = preprocess_image(image)
# Make predictions using both models
pred1 = model1.predict(preprocessed_image)[0][0]
pred2 = model2.predict(preprocessed_image)[0][0]
# Prepare results for each model
result1 = "Real" if pred1 > 0.5 else "Fake"
confidence1 = pred1 if pred1 > 0.5 else 1 - pred1
result2 = "Real" if pred2 > 0.5 else "Fake"
confidence2 = pred2 if pred2 > 0.5 else 1 - pred2
return (
f"Model 1 (ResNet) Prediction: {result1} (Confidence: {confidence1:.2f})",
f"Model 2 (Inception) Prediction: {result2} (Confidence: {confidence2:.2f})"
)
# Create the Gradio interface
iface = gr.Interface(
fn=predict_image,
inputs=gr.Image(),
outputs=[
gr.Textbox(label="Model 1 (ResNet) Prediction"),
gr.Textbox(label="Model 2 (Inception) Prediction")
],
title="Real vs AI-Generated Face Classification",
description="Upload an image to classify whether it's a real face or an AI-generated face using two different models: ResNet-style and Inception-style."
)
# Launch the app
iface.launch()