helinc commited on
Commit
1c3ab0a
·
verified ·
1 Parent(s): 161e04c

Update app/main.py

Browse files
Files changed (1) hide show
  1. app/main.py +67 -67
app/main.py CHANGED
@@ -1,58 +1,70 @@
 
1
  import os
2
- os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2" # silence TF/MediaPipe logs
3
-
4
  from absl import logging as absl_logging
5
  absl_logging.set_verbosity(absl_logging.ERROR)
6
 
7
- from fastapi import FastAPI, Request, UploadFile, File, Form
8
- from fastapi.responses import HTMLResponse
9
- from fastapi.staticfiles import StaticFiles
10
- from fastapi.templating import Jinja2Templates
11
-
12
  import uuid
 
 
13
  import cv2
14
  import numpy as np
15
  import mediapipe as mp
16
- from pathlib import Path
 
 
 
17
 
18
- # ------------ Paths ------------
19
  BASE_DIR = Path(__file__).resolve().parent
20
  TEMPLATES_DIR = BASE_DIR / "templates"
21
  STATIC_DIR = BASE_DIR / "static"
22
- PROCESSED_DIR = STATIC_DIR / "processed"
23
- PROCESSED_DIR.mkdir(parents=True, exist_ok=True)
24
 
25
- # ------------ FastAPI -----------
 
 
 
26
  app = FastAPI(title="Face Blur Web")
27
- app.mount("/static", StaticFiles(directory=STATIC_DIR), name="static")
 
28
  templates = Jinja2Templates(directory=str(TEMPLATES_DIR))
29
 
30
- # ------------ Detection/Blur settings ------------
31
- SCALES = [1.0, 1.5, 2.0, 2.5, 3.0] # multi-scale improves small-face recall
 
32
  MIN_CONF = 0.25
33
  IOU_NMS = 0.35
34
 
35
- OVAL_SCALE_X = 1.35 # widen ellipse vs. face box
36
- OVAL_SCALE_Y = 1.55 # height scale
37
- FEATHER_FRAC = 0.12 # feather edge ~ % of face size
38
 
39
- KERNEL_FRAC = 0.9 # blur kernel ~ % of face size (strong)
40
- MIN_KERNEL = 55 # lower bound kernel (odd)
41
- MAX_KERNEL = 301 # upper bound kernel (odd)
42
- MAX_SIDE = 1800 # optional resize for very large uploads (performance)
43
 
44
- # ------------ Utils ------------
 
 
 
 
 
 
 
 
45
  def odd(n: int) -> int:
 
46
  n = max(3, int(n))
47
  return n if n % 2 == 1 else n + 1
48
 
49
  def nms(boxes, scores, iou_thresh=0.35):
 
50
  if not boxes:
51
  return []
52
  idxs = np.argsort(scores)[::-1]
53
  keep = []
54
  while len(idxs) > 0:
55
- i = idxs[0]; keep.append(i)
 
56
  xx1 = np.maximum(boxes[i][0], np.array([boxes[j][0] for j in idxs[1:]]))
57
  yy1 = np.maximum(boxes[i][1], np.array([boxes[j][1] for j in idxs[1:]]))
58
  xx2 = np.minimum(boxes[i][0]+boxes[i][2], np.array([boxes[j][0]+boxes[j][2] for j in idxs[1:]]))
@@ -65,8 +77,15 @@ def nms(boxes, scores, iou_thresh=0.35):
65
  idxs = idxs[1:][iou <= iou_thresh]
66
  return keep
67
 
 
 
 
 
 
 
 
68
  def detect_faces_mediapipe(img_bgr: np.ndarray):
69
- """Detect faces across multiple scales and both MediaPipe models; return boxes [x,y,w,h]."""
70
  H, W = img_bgr.shape[:2]
71
  mp_fd = mp.solutions.face_detection
72
  detectors = [
@@ -85,7 +104,6 @@ def detect_faces_mediapipe(img_bgr: np.ndarray):
85
  bb = d.location_data.relative_bounding_box
86
  x, y = int(bb.xmin * Uw), int(bb.ymin * Uh)
87
  w, h = int(bb.width * Uw), int(bb.height * Uh)
88
- # map back to original coordinates
89
  x, y, w, h = int(x/s), int(y/s), int(w/s), int(h/s)
90
  x = max(0, x); y = max(0, y)
91
  w = max(1, min(w, W - x)); h = max(1, min(h, H - y))
@@ -94,46 +112,39 @@ def detect_faces_mediapipe(img_bgr: np.ndarray):
94
  keep = nms(boxes, scores, iou_thresh=IOU_NMS)
95
  return [boxes[i] for i in keep]
96
 
97
- def gentle_contrast_boost(img_bgr: np.ndarray) -> np.ndarray:
98
- lab = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2LAB)
99
- l, a, b = cv2.split(lab)
100
- l = cv2.createCLAHE(2.0, (8, 8)).apply(l)
101
- return cv2.cvtColor(cv2.merge([l, a, b]), cv2.COLOR_LAB2BGR)
102
-
103
  def draw_unblurred_preview(img_bgr: np.ndarray, boxes):
104
- preview = img_bgr.copy()
 
105
  for (x, y, w, h) in boxes:
106
  center = (x + w // 2, y + h // 2)
107
  axes = (int((w/2) * OVAL_SCALE_X), int((h/2) * OVAL_SCALE_Y))
108
- cv2.ellipse(preview, center, axes, 0, 0, 360, (0, 255, 0), 3)
109
- return preview
110
 
111
  def blur_faces_oval(img_bgr: np.ndarray, boxes):
 
112
  H, W = img_bgr.shape[:2]
113
  out = img_bgr.copy()
114
  for (x, y, w, h) in boxes:
115
- # ellipse parameters
116
  cx, cy = x + w // 2, y + h // 2
117
  ax = int((w / 2) * OVAL_SCALE_X)
118
  ay = int((h / 2) * OVAL_SCALE_Y)
119
 
120
- # rectangular ROI around ellipse
121
  x0 = max(0, cx - ax); y0 = max(0, cy - ay)
122
  x1 = min(W, cx + ax); y1 = min(H, cy + ay)
123
  roi = out[y0:y1, x0:x1]
124
  rh, rw = roi.shape[:2]
125
 
126
- # kernel proportional to face size (clamped)
127
  k_base = int(KERNEL_FRAC * max(w, h))
 
128
  k = odd(max(MIN_KERNEL, min(MAX_KERNEL, k_base, rh - (rh % 2 == 0), rw - (rw % 2 == 0))))
129
  if k < 9:
130
- # fall back to pixelation if ROI is tiny
131
  small = cv2.resize(roi, (max(1, rw // 10), max(1, rh // 10)), interpolation=cv2.INTER_LINEAR)
132
  roi_blur = cv2.resize(small, (rw, rh), interpolation=cv2.INTER_NEAREST)
133
  else:
134
  roi_blur = cv2.GaussianBlur(roi, (k, k), 0)
135
 
136
- # oval mask + feather
137
  mask = np.zeros((rh, rw), dtype=np.uint8)
138
  cv2.ellipse(mask, (rw // 2, rh // 2), (ax, ay), 0, 0, 360, 255, -1)
139
  feather = odd(int(max(w, h) * FEATHER_FRAC))
@@ -145,68 +156,57 @@ def blur_faces_oval(img_bgr: np.ndarray, boxes):
145
  out[y0:y1, x0:x1] = roi_out
146
  return out
147
 
148
- # ------------ Routes ------------
149
  @app.get("/", response_class=HTMLResponse)
150
  async def index(request: Request):
151
- return templates.TemplateResponse("index.html", {"request": request, "result": None})
152
 
153
  @app.post("/upload", response_class=HTMLResponse)
154
- async def upload_image(
155
- request: Request,
156
- file: UploadFile = File(...),
157
- ):
158
- # Validate extension
159
- name = file.filename or "upload"
160
  if not name.lower().endswith((".jpg", ".jpeg", ".png", ".bmp", ".webp")):
161
  return templates.TemplateResponse("index.html", {
162
- "request": request,
163
- "error": "Unsupported file type. Please upload JPG/PNG/BMP/WEBP.",
164
- "result": None
165
  })
166
 
167
- # Read file into OpenCV image
168
  data = await file.read()
169
  npbuf = np.frombuffer(data, np.uint8)
170
  img = cv2.imdecode(npbuf, cv2.IMREAD_COLOR)
171
  if img is None:
172
  return templates.TemplateResponse("index.html", {
173
- "request": request,
174
- "error": "Could not decode image. Try another file.",
175
- "result": None
176
  })
177
 
178
- # Optional: downscale very large images for performance
179
  H, W = img.shape[:2]
180
  scale = min(1.0, float(MAX_SIDE) / max(H, W))
181
  if scale < 1.0:
182
  img = cv2.resize(img, (int(W * scale), int(H * scale)), interpolation=cv2.INTER_AREA)
183
 
184
- # Small contrast boost helps detection
185
  img_proc = gentle_contrast_boost(img)
186
-
187
- # Detect faces
188
  boxes = detect_faces_mediapipe(img_proc)
189
  faces_count = len(boxes)
190
 
191
- # Generate outputs
192
  preview = draw_unblurred_preview(img_proc, boxes)
193
  blurred = blur_faces_oval(img_proc, boxes)
194
 
195
- # Save results with unique IDs
196
  uid = uuid.uuid4().hex
197
- annot_rel = f"static/processed/{uid}_annot.jpg"
198
- blur_rel = f"static/processed/{uid}_blur.jpg"
199
 
200
- cv2.imwrite(str(BASE_DIR / annot_rel), preview)
201
- cv2.imwrite(str(BASE_DIR / blur_rel), blurred)
202
 
203
  return templates.TemplateResponse("index.html", {
204
  "request": request,
205
  "error": None,
206
  "result": {
207
  "faces": faces_count,
208
- "annot_url": f"/{annot_rel}",
209
- "blur_url": f"/{blur_rel}",
210
  "filename": name
211
  }
212
  })
 
 
1
+ # app/main.py
2
  import os
3
+ os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2" # quiet TF/MediaPipe logs
 
4
  from absl import logging as absl_logging
5
  absl_logging.set_verbosity(absl_logging.ERROR)
6
 
 
 
 
 
 
7
  import uuid
8
+ from pathlib import Path
9
+
10
  import cv2
11
  import numpy as np
12
  import mediapipe as mp
13
+ from fastapi import FastAPI, Request, UploadFile, File
14
+ from fastapi.responses import HTMLResponse
15
+ from fastapi.staticfiles import StaticFiles
16
+ from fastapi.templating import Jinja2Templates
17
 
18
+ # ---------------- Paths & FastAPI ----------------
19
  BASE_DIR = Path(__file__).resolve().parent
20
  TEMPLATES_DIR = BASE_DIR / "templates"
21
  STATIC_DIR = BASE_DIR / "static"
 
 
22
 
23
+ # Writable location on Hugging Face Spaces
24
+ RESULTS_DIR = Path("/tmp/faceblur")
25
+ RESULTS_DIR.mkdir(parents=True, exist_ok=True)
26
+
27
  app = FastAPI(title="Face Blur Web")
28
+ app.mount("/static", StaticFiles(directory=str(STATIC_DIR)), name="static")
29
+ app.mount("/results", StaticFiles(directory=str(RESULTS_DIR)), name="results")
30
  templates = Jinja2Templates(directory=str(TEMPLATES_DIR))
31
 
32
+ # --------------- Detection/Blur settings ----------------
33
+ # Multi-scale + both MediaPipe models helps with small/far faces
34
+ SCALES = [1.0, 1.5, 2.0, 2.5, 3.0]
35
  MIN_CONF = 0.25
36
  IOU_NMS = 0.35
37
 
38
+ # Oval size around each detected face (relative to bbox)
39
+ OVAL_SCALE_X = 1.35
40
+ OVAL_SCALE_Y = 1.55
41
 
42
+ # Edge feathering (as fraction of face size)
43
+ FEATHER_FRAC = 0.12
 
 
44
 
45
+ # Blur strength scales with face size
46
+ KERNEL_FRAC = 0.9
47
+ MIN_KERNEL = 55
48
+ MAX_KERNEL = 301
49
+
50
+ # Downscale very large uploads for speed
51
+ MAX_SIDE = 1800
52
+
53
+ # ----------------- Utils -----------------
54
  def odd(n: int) -> int:
55
+ """Ensure odd kernel size >= 3."""
56
  n = max(3, int(n))
57
  return n if n % 2 == 1 else n + 1
58
 
59
  def nms(boxes, scores, iou_thresh=0.35):
60
+ """Non-max suppression for [x,y,w,h] boxes."""
61
  if not boxes:
62
  return []
63
  idxs = np.argsort(scores)[::-1]
64
  keep = []
65
  while len(idxs) > 0:
66
+ i = idxs[0]
67
+ keep.append(i)
68
  xx1 = np.maximum(boxes[i][0], np.array([boxes[j][0] for j in idxs[1:]]))
69
  yy1 = np.maximum(boxes[i][1], np.array([boxes[j][1] for j in idxs[1:]]))
70
  xx2 = np.minimum(boxes[i][0]+boxes[i][2], np.array([boxes[j][0]+boxes[j][2] for j in idxs[1:]]))
 
77
  idxs = idxs[1:][iou <= iou_thresh]
78
  return keep
79
 
80
+ def gentle_contrast_boost(img_bgr: np.ndarray) -> np.ndarray:
81
+ """CLAHE on L channel of LAB for small contrast lift (helps detection)."""
82
+ lab = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2LAB)
83
+ l, a, b = cv2.split(lab)
84
+ l = cv2.createCLAHE(2.0, (8, 8)).apply(l)
85
+ return cv2.cvtColor(cv2.merge([l, a, b]), cv2.COLOR_LAB2BGR)
86
+
87
  def detect_faces_mediapipe(img_bgr: np.ndarray):
88
+ """Detect faces across multiple scales and both MediaPipe models."""
89
  H, W = img_bgr.shape[:2]
90
  mp_fd = mp.solutions.face_detection
91
  detectors = [
 
104
  bb = d.location_data.relative_bounding_box
105
  x, y = int(bb.xmin * Uw), int(bb.ymin * Uh)
106
  w, h = int(bb.width * Uw), int(bb.height * Uh)
 
107
  x, y, w, h = int(x/s), int(y/s), int(w/s), int(h/s)
108
  x = max(0, x); y = max(0, y)
109
  w = max(1, min(w, W - x)); h = max(1, min(h, H - y))
 
112
  keep = nms(boxes, scores, iou_thresh=IOU_NMS)
113
  return [boxes[i] for i in keep]
114
 
 
 
 
 
 
 
115
  def draw_unblurred_preview(img_bgr: np.ndarray, boxes):
116
+ """Draw green ovals (no blur)."""
117
+ out = img_bgr.copy()
118
  for (x, y, w, h) in boxes:
119
  center = (x + w // 2, y + h // 2)
120
  axes = (int((w/2) * OVAL_SCALE_X), int((h/2) * OVAL_SCALE_Y))
121
+ cv2.ellipse(out, center, axes, 0, 0, 360, (0, 255, 0), 3)
122
+ return out
123
 
124
  def blur_faces_oval(img_bgr: np.ndarray, boxes):
125
+ """Strong oval blur with feathered edges; kernel scales per face size."""
126
  H, W = img_bgr.shape[:2]
127
  out = img_bgr.copy()
128
  for (x, y, w, h) in boxes:
 
129
  cx, cy = x + w // 2, y + h // 2
130
  ax = int((w / 2) * OVAL_SCALE_X)
131
  ay = int((h / 2) * OVAL_SCALE_Y)
132
 
 
133
  x0 = max(0, cx - ax); y0 = max(0, cy - ay)
134
  x1 = min(W, cx + ax); y1 = min(H, cy + ay)
135
  roi = out[y0:y1, x0:x1]
136
  rh, rw = roi.shape[:2]
137
 
 
138
  k_base = int(KERNEL_FRAC * max(w, h))
139
+ # clamp to ROI and ensure odd
140
  k = odd(max(MIN_KERNEL, min(MAX_KERNEL, k_base, rh - (rh % 2 == 0), rw - (rw % 2 == 0))))
141
  if k < 9:
142
+ # fallback to pixelation if ROI too small
143
  small = cv2.resize(roi, (max(1, rw // 10), max(1, rh // 10)), interpolation=cv2.INTER_LINEAR)
144
  roi_blur = cv2.resize(small, (rw, rh), interpolation=cv2.INTER_NEAREST)
145
  else:
146
  roi_blur = cv2.GaussianBlur(roi, (k, k), 0)
147
 
 
148
  mask = np.zeros((rh, rw), dtype=np.uint8)
149
  cv2.ellipse(mask, (rw // 2, rh // 2), (ax, ay), 0, 0, 360, 255, -1)
150
  feather = odd(int(max(w, h) * FEATHER_FRAC))
 
156
  out[y0:y1, x0:x1] = roi_out
157
  return out
158
 
159
+ # ----------------- Routes -----------------
160
  @app.get("/", response_class=HTMLResponse)
161
  async def index(request: Request):
162
+ return templates.TemplateResponse("index.html", {"request": request, "result": None, "error": None})
163
 
164
  @app.post("/upload", response_class=HTMLResponse)
165
+ async def upload_image(request: Request, file: UploadFile = File(...)):
166
+ name = (file.filename or "upload").strip()
 
 
 
 
167
  if not name.lower().endswith((".jpg", ".jpeg", ".png", ".bmp", ".webp")):
168
  return templates.TemplateResponse("index.html", {
169
+ "request": request, "error": "Unsupported file type. Use JPG/PNG/BMP/WEBP.", "result": None
 
 
170
  })
171
 
 
172
  data = await file.read()
173
  npbuf = np.frombuffer(data, np.uint8)
174
  img = cv2.imdecode(npbuf, cv2.IMREAD_COLOR)
175
  if img is None:
176
  return templates.TemplateResponse("index.html", {
177
+ "request": request, "error": "Could not decode image.", "result": None
 
 
178
  })
179
 
180
+ # Optional downscale for very large images
181
  H, W = img.shape[:2]
182
  scale = min(1.0, float(MAX_SIDE) / max(H, W))
183
  if scale < 1.0:
184
  img = cv2.resize(img, (int(W * scale), int(H * scale)), interpolation=cv2.INTER_AREA)
185
 
186
+ # Detection pipeline
187
  img_proc = gentle_contrast_boost(img)
 
 
188
  boxes = detect_faces_mediapipe(img_proc)
189
  faces_count = len(boxes)
190
 
 
191
  preview = draw_unblurred_preview(img_proc, boxes)
192
  blurred = blur_faces_oval(img_proc, boxes)
193
 
194
+ # Save to /tmp and serve via /results
195
  uid = uuid.uuid4().hex
196
+ annot_name = f"{uid}_annot.jpg"
197
+ blur_name = f"{uid}_blur.jpg"
198
 
199
+ cv2.imwrite(str(RESULTS_DIR / annot_name), preview)
200
+ cv2.imwrite(str(RESULTS_DIR / blur_name), blurred)
201
 
202
  return templates.TemplateResponse("index.html", {
203
  "request": request,
204
  "error": None,
205
  "result": {
206
  "faces": faces_count,
207
+ "annot_url": f"/results/{annot_name}",
208
+ "blur_url": f"/results/{blur_name}",
209
  "filename": name
210
  }
211
  })
212
+