Urvikava commited on
Commit
dbbbc4b
·
verified ·
1 Parent(s): 12058a1

Upload 11 files

Browse files
.dockerignore ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ __pycache__
2
+ *.pyc
3
+ venv*
4
+ .ipynb_checkpoints
Dockerfile ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.10-slim
2
+
3
+ WORKDIR /app
4
+
5
+ # system deps
6
+ RUN apt-get update && apt-get install -y \
7
+ libgl1 \
8
+ && rm -rf /var/lib/apt/lists/*
9
+
10
+ COPY requirements.txt .
11
+ RUN pip install --no-cache-dir -r requirements.txt
12
+
13
+ COPY . .
14
+
15
+ EXPOSE 7860
16
+
17
+ CMD ["uvicorn", "Fastapi.main:app", "--host", "0.0.0.0", "--port", "7860"]
Fastapi/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (150 Bytes). View file
 
Fastapi/__pycache__/main.cpython-312.pyc ADDED
Binary file (3.81 kB). View file
 
Fastapi/main.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, UploadFile, File, HTTPException, Form
2
+ import torch
3
+ import os
4
+ import uuid
5
+ import numpy as np
6
+ from PIL import Image
7
+ from fastapi.middleware.cors import CORSMiddleware
8
+
9
+
10
+ from inference.inference import (
11
+ convert_to_ela_image,
12
+ load_model
13
+ )
14
+
15
+ app = FastAPI(title="DeepFake Detection API")
16
+
17
+ app.add_middleware(
18
+ CORSMiddleware,
19
+ allow_origins=["*"],
20
+ allow_credentials=True,
21
+ allow_methods=["*"],
22
+ allow_headers=["*"],
23
+ )
24
+
25
+ # Load model once
26
+ model, config, device = load_model()
27
+
28
+ @app.post("/predict")
29
+ async def predict(
30
+ file: UploadFile = File(...),
31
+ source: str = Form("upload") # 👈 camera | upload
32
+ ):
33
+ if not file.content_type or not file.content_type.startswith("image/"):
34
+ raise HTTPException(status_code=400, detail="Uploaded file is not an image")
35
+
36
+ temp_filename = f"temp_{uuid.uuid4().hex}.jpg"
37
+
38
+ try:
39
+ contents = await file.read()
40
+ if not contents:
41
+ raise HTTPException(status_code=400, detail="Empty image file")
42
+
43
+ with open(temp_filename, "wb") as f:
44
+ f.write(contents)
45
+
46
+ # Validate image
47
+ try:
48
+ Image.open(temp_filename).verify()
49
+ except Exception:
50
+ raise HTTPException(status_code=400, detail="Invalid image")
51
+
52
+ # 🔥 CONDITIONAL PREPROCESSING
53
+ if source == "camera":
54
+ img = Image.open(temp_filename).convert("RGB").resize((128, 128))
55
+ img = torch.tensor(np.array(img) / 255.0, dtype=torch.float32)
56
+ img = img.permute(2, 0, 1).unsqueeze(0).to(device)
57
+ else:
58
+ img = convert_to_ela_image(temp_filename).to(device)
59
+
60
+ with torch.no_grad():
61
+ output = model(img)
62
+ probs = torch.softmax(output, dim=1)
63
+ pred = torch.argmax(probs, dim=1).item()
64
+ confidence = probs[0, pred].item()
65
+
66
+ return {
67
+ "prediction": config["class_mapping"][str(pred)],
68
+ "confidence": round(confidence * 100, 2)
69
+ }
70
+
71
+ finally:
72
+ if os.path.exists(temp_filename):
73
+ os.remove(temp_filename)
inference/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (152 Bytes). View file
 
inference/__pycache__/inference.cpython-312.pyc ADDED
Binary file (6.08 kB). View file
 
inference/inference.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import json
4
+ import numpy as np
5
+ import os
6
+ from PIL import Image, ImageChops, ImageEnhance
7
+
8
+ IMAGE_SIZE = (128, 128)
9
+
10
+ # ---------------- ELA ----------------
11
+ def convert_to_ela_image(image_path, quality=95):
12
+ original = Image.open(image_path).convert("RGB")
13
+
14
+ temp_path = "temp_ela.jpg"
15
+ original.save(temp_path, "JPEG", quality=quality)
16
+
17
+ compressed = Image.open(temp_path)
18
+ diff = ImageChops.difference(original, compressed)
19
+
20
+ extrema = diff.getextrema()
21
+ max_diff = max([ex[1] for ex in extrema])
22
+ scale = 255.0 / max_diff if max_diff != 0 else 1
23
+
24
+ diff = ImageEnhance.Brightness(diff).enhance(scale)
25
+ diff = diff.resize(IMAGE_SIZE)
26
+
27
+ ela = np.array(diff, dtype=np.float32) / 255.0
28
+ ela = torch.tensor(ela).permute(2, 0, 1).unsqueeze(0)
29
+
30
+ return ela
31
+
32
+
33
+ # ---------------- MODEL ----------------
34
+ class DeepFakeCNN(nn.Module):
35
+ def __init__(self):
36
+ super().__init__()
37
+
38
+ self.features = nn.Sequential(
39
+ nn.Conv2d(3, 32, kernel_size=5),
40
+ nn.ReLU(),
41
+ nn.MaxPool2d(2),
42
+
43
+ nn.Conv2d(32, 64, kernel_size=3),
44
+ nn.ReLU(),
45
+ nn.MaxPool2d(2),
46
+
47
+ nn.Conv2d(64, 128, kernel_size=3),
48
+ nn.ReLU(),
49
+ nn.MaxPool2d(2)
50
+ )
51
+
52
+ self._to_linear = None
53
+ self._get_flatten_size()
54
+
55
+ self.classifier = nn.Sequential(
56
+ nn.Linear(self._to_linear, 128),
57
+ nn.ReLU(),
58
+ nn.Dropout(0.4),
59
+ nn.Linear(128, 2)
60
+ )
61
+
62
+ def _get_flatten_size(self):
63
+ with torch.no_grad():
64
+ x = torch.zeros(1, 3, 128, 128)
65
+ x = self.features(x)
66
+ self._to_linear = x.view(1, -1).shape[1]
67
+
68
+ def forward(self, x):
69
+ x = self.features(x)
70
+ x = x.view(x.size(0), -1)
71
+ return self.classifier(x)
72
+
73
+
74
+ # ---------------- LOAD MODEL ----------------
75
+ BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
76
+
77
+ def load_model(
78
+ model_path=os.path.join(BASE_DIR, "models", "deepfake_cnn_2.pth"),
79
+ config_path=os.path.join(BASE_DIR, "models", "config_cnn_2.json")
80
+ ):
81
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
82
+
83
+ with open(config_path, "r") as f:
84
+ config = json.load(f)
85
+
86
+ model = DeepFakeCNN().to(device)
87
+ model.load_state_dict(torch.load(model_path, map_location=device))
88
+ model.eval()
89
+
90
+ return model, config, device
91
+
92
+ # Prediction Function
93
+
94
+ def predict_image(image_path, model, config, device):
95
+ ela = convert_to_ela_image(image_path).to(device)
96
+
97
+ with torch.no_grad():
98
+ outputs = model(ela)
99
+ probs = torch.softmax(outputs, dim=1)
100
+ pred = torch.argmax(probs, dim=1).item()
101
+ confidence = probs[0, pred].item()
102
+
103
+ label = config["class_mapping"][str(pred)]
104
+
105
+ return label, confidence
106
+
models/config_cnn_2.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model_name": "DeepfakeCNN_ELA",
3
+ "framework": "pytorch",
4
+ "input_size": [
5
+ 128,
6
+ 128,
7
+ 3
8
+ ],
9
+ "ela_quality": 95,
10
+ "normalization": "divide_by_255",
11
+ "class_mapping": {
12
+ "0": "REAL",
13
+ "1": "FAKE"
14
+ },
15
+ "num_classes": 2,
16
+ "author": "Urvi",
17
+ "version": "1.0"
18
+ }
models/deepfake_cnn_2.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0609adf33b9b8eda8633e2658f271f8a116ca2f948352586589e3dd1788a135b
3
+ size 13230389
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ fastapi
2
+ uvicorn
3
+ torch
4
+ torchvision
5
+ pillow
6
+ numpy
7
+ python-multipart