hudaakram commited on
Commit
a055f15
·
verified ·
1 Parent(s): 699922a

Upload folder using huggingface_hub

Browse files
Files changed (3) hide show
  1. README.md +12 -7
  2. app.py +92 -0
  3. requirements.txt +7 -0
README.md CHANGED
@@ -1,14 +1,19 @@
1
  ---
2
  title: Deepfake Detector
3
- emoji: 🦀
4
- colorFrom: indigo
5
- colorTo: indigo
6
  sdk: gradio
7
- sdk_version: 5.45.0
8
  app_file: app.py
9
  pinned: false
10
- license: mit
11
- short_description: Deepfake detection with multiple ViT models
12
  ---
13
 
14
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
1
  ---
2
  title: Deepfake Detector
3
+ emoji: 🕵️‍♀️
4
+ colorFrom: purple
5
+ colorTo: blue
6
  sdk: gradio
 
7
  app_file: app.py
8
  pinned: false
 
 
9
  ---
10
 
11
+ # Deepfake Detector
12
+ - Multi-model **image-classification** for deepfake vs real.
13
+ - Pick a backbone or enable **Ensemble** to average scores.
14
+ - Models used (pretrained from the Hub):
15
+ - `prithivMLmods/Deep-Fake-Detector-v2-Model`
16
+ - `Wvolf/ViT_Deepfake_Detection`
17
+ - `yermandy/deepfake-detection`
18
+
19
+ > Notes: Results are **heuristics**; performance depends on content/quality and may vary across datasets. For video support, face cropping + temporal smoothing will be added next.
app.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import gradio as gr
3
+ from transformers import pipeline
4
+
5
+ # ---- Model registry (all are image-classification on HF) ----
6
+ # Sources:
7
+ # - prithivMLmods/Deep-Fake-Detector-v2-Model (labels: Realism / Deepfake)
8
+ # - Wvolf/ViT_Deepfake_Detection (real/fake)
9
+ # - yermandy/deepfake-detection (CLIP-encoder baseline for deepfake)
10
+ MODEL_REGISTRY = {
11
+ "ViT Deepfake v2 (Prithiv)": "prithivMLmods/Deep-Fake-Detector-v2-Model",
12
+ "ViT Deepfake (Wvolf)": "Wvolf/ViT_Deepfake_Detection",
13
+ "CLIP Deepfake (yermandy)": "yermandy/deepfake-detection",
14
+ }
15
+
16
+ _pipes = {}
17
+
18
+ def _get_pipe(model_id: str):
19
+ if model_id not in _pipes:
20
+ _pipes[model_id] = pipeline("image-classification",
21
+ model=model_id,
22
+ device_map="auto")
23
+ return _pipes[model_id]
24
+
25
+ def _fake_real_probs(result):
26
+ # result: list[{'label': str, 'score': float}]
27
+ fake, real = 0.0, 0.0
28
+ for r in result:
29
+ lbl = r["label"].strip().lower()
30
+ s = float(r["score"])
31
+ if ("fake" in lbl) or ("deepfake" in lbl) or ("ai" in lbl):
32
+ fake = max(fake, s)
33
+ if ("real" in lbl) or ("realism" in lbl) or ("authentic" in lbl):
34
+ real = max(real, s)
35
+ if fake==0.0 and real==0.0:
36
+ # fallback: take top-1 and mirror
37
+ top = max(result, key=lambda x: x["score"])
38
+ is_fake = ("fake" in top["label"].lower()) or ("deepfake" in top["label"].lower()) or ("ai" in top["label"].lower())
39
+ if is_fake:
40
+ fake, real = float(top["score"]), 1.0 - float(top["score"])
41
+ else:
42
+ real, fake = float(top["score"]), 1.0 - float(top["score"])
43
+ # normalize to sum<=1 if both present
44
+ s = fake + real
45
+ if s > 1.0 and s > 0:
46
+ fake, real = fake/s, real/s
47
+ return fake, real
48
+
49
+ def predict(img, model_name, ensemble, top_k, threshold):
50
+ model_ids = list(MODEL_REGISTRY.values()) if ensemble else [MODEL_REGISTRY[model_name]]
51
+
52
+ agg_fake, agg_real, rows = [], [], []
53
+ for mid in model_ids:
54
+ pipe = _get_pipe(mid)
55
+ out = pipe(img, top_k=top_k)
56
+ f, r = _fake_real_probs(out)
57
+ agg_fake.append(f); agg_real.append(r)
58
+ for item in out:
59
+ rows.append([mid, item["label"], float(item["score"])])
60
+
61
+ fake_prob = sum(agg_fake)/len(agg_fake)
62
+ real_prob = sum(agg_real)/len(agg_real)
63
+ pred = "FAKE" if fake_prob >= threshold else "REAL"
64
+
65
+ # aggregate chart (Label expects {label: score})
66
+ chart = {"FAKE": float(fake_prob), "REAL": float(real_prob)}
67
+ # top-k table
68
+ rows = sorted(rows, key=lambda x: x[2], reverse=True)[: top_k * len(model_ids)]
69
+ return pred, chart, rows
70
+
71
+ with gr.Blocks(theme="soft") as demo:
72
+ gr.Markdown("# 🔎 Deepfake Detector\nChoose a model or use an **Ensemble** for a more robust score.")
73
+ with gr.Row():
74
+ with gr.Column(scale=3):
75
+ img = gr.Image(type="pil", label="Upload image (face works best)")
76
+ with gr.Accordion("Settings", open=False):
77
+ model_name = gr.Dropdown(list(MODEL_REGISTRY.keys()),
78
+ value="ViT Deepfake v2 (Prithiv)",
79
+ label="Backbone")
80
+ ensemble = gr.Checkbox(label="Ensemble (use all models)", value=False)
81
+ top_k = gr.Slider(1, 5, value=3, step=1, label="Top-k per model")
82
+ threshold = gr.Slider(0.1, 0.9, value=0.5, step=0.01, label="FAKE threshold")
83
+ btn = gr.Button("Predict", variant="primary")
84
+ with gr.Column(scale=2):
85
+ pred = gr.Label(label="Prediction (FAKE vs REAL)")
86
+ chart = gr.Label(label="Aggregated probabilities")
87
+ table = gr.Dataframe(headers=["model", "label", "score"], wrap=True)
88
+
89
+ btn.click(predict, [img, model_name, ensemble, top_k, threshold], [pred, chart, table])
90
+
91
+ if __name__ == "__main__":
92
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ gradio>=4.44.0
2
+ transformers>=4.43.0
3
+ accelerate>=0.33.0
4
+ safetensors
5
+ torch
6
+ Pillow
7
+ numpy