Spaces:
Sleeping
Sleeping
import gradio as gr | |
import torch | |
import cv2 | |
import tempfile | |
import numpy as np | |
from PIL import Image | |
from transformers import AutoImageProcessor, AutoModelForImageClassification | |
# Load the deepfake detection model | |
processor = AutoImageProcessor.from_pretrained("Smogy/SMOGY-Ai-images-detector") | |
model = AutoModelForImageClassification.from_pretrained("Smogy/SMOGY-Ai-images-detector") | |
# Load face detection cascade | |
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml') | |
def detect_deepfake_image(image: Image.Image) -> str: | |
"""Detect deepfake in a single image""" | |
inputs = processor(images=image, return_tensors="pt") | |
with torch.no_grad(): | |
outputs = model(**inputs) | |
probs = torch.softmax(outputs.logits, dim=1) | |
idx = probs.argmax().item() | |
label = model.config.id2label[idx] | |
conf = probs[0, idx].item() | |
return f"The image is {label} with confidence {conf:.2f}" | |
def process_video(video_path: str) -> str: | |
"""Process video frame by frame and add detection annotations""" | |
# Open input video | |
cap = cv2.VideoCapture(video_path) | |
if not cap.isOpened(): | |
raise ValueError("Could not open video file") | |
# Get video properties | |
fps = cap.get(cv2.CAP_PROP_FPS) | |
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) | |
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) | |
# Create temporary output file | |
with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as temp_file: | |
output_path = temp_file.name | |
# Initialize video writer | |
fourcc = cv2.VideoWriter_fourcc(*'mp4v') | |
out = cv2.VideoWriter(output_path, fourcc, fps, (width, height)) | |
# Process each frame | |
while cap.isOpened(): | |
ret, frame = cap.read() | |
if not ret: | |
break | |
# Convert to grayscale for face detection | |
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) | |
faces = face_cascade.detectMultiScale(gray, 1.1, 4) | |
# Process each detected face | |
for (x, y, w, h) in faces: | |
# Extract face ROI | |
face_img = frame[y:y+h, x:x+w] | |
# Convert to PIL Image and process | |
face_pil = Image.fromarray(cv2.cvtColor(face_img, cv2.COLOR_BGR2RGB)) | |
inputs = processor(images=face_pil, return_tensors="pt") | |
with torch.no_grad(): | |
outputs = model(**inputs) | |
probs = torch.softmax(outputs.logits, dim=1) | |
idx = probs.argmax().item() | |
label = model.config.id2label[idx] | |
conf = probs[0, idx].item() | |
# Draw bounding box and label | |
color = (0, 255, 0) if label == 'real' else (0, 0, 255) # BGR format | |
cv2.rectangle(frame, (x, y), (x+w, y+h), color, 2) | |
cv2.putText(frame, f"{label} {conf:.2f}", | |
(x, y-10), cv2.FONT_HERSHEY_SIMPLEX, | |
0.7, color, 2, cv2.LINE_AA) | |
# Write processed frame | |
out.write(frame) | |
# Release resources | |
cap.release() | |
out.release() | |
return output_path | |
with gr.Blocks() as demo: | |
gr.Markdown("# Deepfake Detection Suite") | |
with gr.Tab("Image Detection"): | |
gr.Markdown("## Image Deepfake Detection") | |
img_input = gr.Image(type="pil", label="Input Image") | |
img_output = gr.Textbox(label="Detection Result") | |
img_button = gr.Button("Analyze Image") | |
with gr.Tab("Video Detection"): | |
gr.Markdown("## Video Deepfake Detection") | |
vid_input = gr.Video(label="Input Video") | |
vid_output = gr.Video(label="Processed Video") | |
vid_button = gr.Button("Analyze Video") | |
img_button.click(fn=detect_deepfake_image, inputs=img_input, outputs=img_output) | |
vid_button.click(fn=process_video, inputs=vid_input, outputs=vid_output) | |
if __name__ == "__main__": | |
demo.launch() |