import gradio as gr import cv2 import torch import numpy as np import matplotlib.pyplot as plt from PIL import Image from transformers import AutoImageProcessor, SiglipForImageClassification # ✅ Load model and processor (no manual files) model_name = "prithivMLmods/deepfake-detector-model-v1" processor = AutoImageProcessor.from_pretrained(model_name) model = SiglipForImageClassification.from_pretrained(model_name) model.eval() # ✅ Face detector face_detector = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml") # ✅ Deepfake detection function def analyze(video_path): if video_path is None: return "❌ Please upload a video", None cap = cv2.VideoCapture(video_path) frame_preds = [] frame_count = 0 max_frames = 60 while True: ret, frame = cap.read() if not ret or frame_count >= max_frames: break gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) faces = face_detector.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5) found = False for (x, y, w, h) in faces: face = frame[y:y+h, x:x+w] if face.size == 0: continue face_rgb = cv2.cvtColor(face, cv2.COLOR_BGR2RGB) pil_image = Image.fromarray(face_rgb) inputs = processor(images=pil_image, return_tensors="pt") with torch.no_grad(): logits = model(**inputs).logits fake_prob = torch.softmax(logits, dim=-1)[0][1].item() frame_preds.append(fake_prob) found = True break if not found: frame_preds.append(0.5) # neutral prediction frame_count += 1 cap.release() if not frame_preds: return "❌ No faces found. Try a better-quality video.", None avg = np.mean(frame_preds) verdict = "FAKE" if avg > 0.5 else "REAL" result = f"✅ FINAL RESULT: **{verdict}**\n🔢 Confidence: {avg:.2f}" # ✅ Plot fig, ax = plt.subplots(figsize=(6, 4)) ax.hist(frame_preds, bins=10, color="red" if avg > 0.5 else "green", edgecolor="black") ax.set_title("Fake Confidence per Frame") ax.set_xlabel("Confidence (0=Real, 1=Fake)") ax.set_ylabel("Frame Count") ax.grid(True) return result, fig # ✅ Gradio interface with gr.Blocks() as demo: gr.Markdown("## 🎭 Deepfake Detector (Colab Version Converted to Gradio)") gr.Markdown("Upload a short `.mp4` video and get a REAL or FAKE decision with confidence histogram.") video = gr.Video(label="Upload your video") result = gr.Markdown() plot = gr.Plot() button = gr.Button("🔍 Analyze") button.click(fn=analyze, inputs=video, outputs=[result, plot]) demo.queue().launch()