alfred8995 commited on
Commit
6b2e155
·
verified ·
1 Parent(s): 1337823

Upload folder using huggingface_hub

Browse files
Files changed (4) hide show
  1. README.md +23 -0
  2. chute_config.yml +24 -0
  3. miner.py +627 -0
  4. weights.onnx +3 -0
README.md ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - element_type:detect
4
+ - model:yolov11-nano
5
+ - object:person
6
+ manako:
7
+ description: Roboflow - generated by element_trainer service to detect person
8
+ source: element_trainer/800e961b-eb64-4380-880c-f1ed67abd563
9
+ prompt_hints: null
10
+ input_payload:
11
+ - name: frame
12
+ type: image
13
+ description: RGB frame
14
+ output_payload:
15
+ - name: detections
16
+ type: detections
17
+ description: List of detections
18
+ evaluation_score: null
19
+ last_benchmark:
20
+ type: synthetic_fixed
21
+ ran_at: '2026-03-06T02:20:51.927289Z'
22
+ result_path: benchmark/synthetic/1ada5b1e-38b8-4bdc-967a-d8a27b0e6afb.json
23
+ ---
chute_config.yml ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Image:
2
+ from_base: parachutes/python:3.12
3
+ run_command:
4
+ - pip install --upgrade setuptools wheel
5
+ - pip install 'numpy>=1.23' 'onnxruntime-gpu[cuda,cudnn]>=1.16' 'opencv-python>=4.7' 'pillow>=9.5' 'huggingface_hub>=0.19.4' 'pydantic>=2.0' 'pyyaml>=6.0' 'aiohttp>=3.9'
6
+ - pip install torch torchvision
7
+
8
+ NodeSelector:
9
+ gpu_count: 1
10
+ min_vram_gb_per_gpu: 16
11
+ max_hourly_price_per_gpu: 0.5
12
+
13
+ exclude:
14
+ - "5090"
15
+ - b200
16
+ - h200
17
+ - mi300x
18
+
19
+ Chute:
20
+ timeout_seconds: 900
21
+ concurrency: 4
22
+ max_instances: 5
23
+ scaling_threshold: 0.5
24
+ shutdown_after_seconds: 288000
miner.py ADDED
@@ -0,0 +1,627 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ import math
3
+
4
+ import cv2
5
+ import numpy as np
6
+ import onnxruntime as ort
7
+ from numpy import ndarray
8
+ from pydantic import BaseModel
9
+
10
+
11
+ class BoundingBox(BaseModel):
12
+ x1: int
13
+ y1: int
14
+ x2: int
15
+ y2: int
16
+ cls_id: int
17
+ conf: float
18
+
19
+
20
+ class TVFrameResult(BaseModel):
21
+ frame_id: int
22
+ boxes: list[BoundingBox]
23
+ keypoints: list[tuple[int, int]]
24
+
25
+
26
+ class Miner:
27
+ def __init__(self, path_hf_repo: Path) -> None:
28
+ model_path = path_hf_repo / "weights.onnx"
29
+ self.class_names = ["person"]
30
+ print("ORT version:", ort.__version__)
31
+
32
+ try:
33
+ ort.preload_dlls()
34
+ print("✅ onnxruntime.preload_dlls() success")
35
+ except Exception as e:
36
+ print(f"⚠️ preload_dlls failed: {e}")
37
+
38
+ print("ORT available providers BEFORE session:", ort.get_available_providers())
39
+
40
+ sess_options = ort.SessionOptions()
41
+ sess_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL
42
+
43
+ try:
44
+ self.session = ort.InferenceSession(
45
+ str(model_path),
46
+ sess_options=sess_options,
47
+ providers=["CUDAExecutionProvider", "CPUExecutionProvider"],
48
+ )
49
+ print("✅ Created ORT session with preferred CUDA provider list")
50
+ except Exception as e:
51
+ print(f"⚠️ CUDA session creation failed, falling back to CPU: {e}")
52
+ self.session = ort.InferenceSession(
53
+ str(model_path),
54
+ sess_options=sess_options,
55
+ providers=["CPUExecutionProvider"],
56
+ )
57
+
58
+ print("ORT session providers:", self.session.get_providers())
59
+
60
+ for inp in self.session.get_inputs():
61
+ print("INPUT:", inp.name, inp.shape, inp.type)
62
+
63
+ for out in self.session.get_outputs():
64
+ print("OUTPUT:", out.name, out.shape, out.type)
65
+
66
+ self.input_name = self.session.get_inputs()[0].name
67
+ self.output_names = [output.name for output in self.session.get_outputs()]
68
+ self.input_shape = self.session.get_inputs()[0].shape
69
+
70
+ self.input_height = self._safe_dim(self.input_shape[2], default=1280)
71
+ self.input_width = self._safe_dim(self.input_shape[3], default=1280)
72
+
73
+ # --- Scoring-aware adaptive confidence ---
74
+ # total_score = mAP50 * 0.65 + FP_score * 0.35
75
+ # FP_score = max(0, 1 - n_FP / n_images), typically n_images ≈ 10
76
+ #
77
+ # mAP50 weight is higher for person detection → favor recall slightly more
78
+ # Crossover at ~1.9 GT/image: below → recall wins, above → precision wins
79
+ self.conf_thres = 0.1198 # Base threshold for candidate generation (wide net)
80
+ self.iou_thres = 0.4229 # NMS threshold
81
+ self.max_det = 150
82
+
83
+ # TTA consensus thresholds
84
+ self.conf_high = 0.7134 # Boxes above this survive without TTA confirmation
85
+ self.tta_match_iou = 0.4793 # TTA cross-view match IoU
86
+
87
+ # Adaptive conf curve: lerp between low/high based on raw detection count
88
+ self.conf_adapt_low = 0.3361 # Few objects: favor recall, each TP ≈ 0.065+ of total
89
+ self.conf_adapt_high = 0.6591 # Many objects: favor precision, FP costs 0.035 each
90
+ self.count_low = 20 # Raw count below this → use conf_adapt_low
91
+ self.count_high = 23 # Raw count above this → use conf_adapt_high
92
+
93
+ self.use_tta = True
94
+
95
+ # Box sanity filters
96
+ self.min_box_area = 14 * 14
97
+ self.min_w = 8
98
+ self.min_h = 8
99
+ self.max_aspect_ratio = 6.5
100
+ self.max_box_area_ratio = 0.8
101
+
102
+ print(f"✅ ONNX model loaded from: {model_path}")
103
+ print(f"✅ ONNX providers: {self.session.get_providers()}")
104
+ print(f"✅ ONNX input: name={self.input_name}, shape={self.input_shape}")
105
+
106
+ def __repr__(self) -> str:
107
+ return (
108
+ f"ONNXRuntime(session={type(self.session).__name__}, "
109
+ f"providers={self.session.get_providers()})"
110
+ )
111
+
112
+ @staticmethod
113
+ def _safe_dim(value, default: int) -> int:
114
+ return value if isinstance(value, int) and value > 0 else default
115
+
116
+ def _letterbox(
117
+ self,
118
+ image: ndarray,
119
+ new_shape: tuple[int, int],
120
+ color=(114, 114, 114),
121
+ ) -> tuple[ndarray, float, tuple[float, float]]:
122
+ h, w = image.shape[:2]
123
+ new_w, new_h = new_shape
124
+
125
+ ratio = min(new_w / w, new_h / h)
126
+ resized_w = int(round(w * ratio))
127
+ resized_h = int(round(h * ratio))
128
+
129
+ if (resized_w, resized_h) != (w, h):
130
+ interp = cv2.INTER_CUBIC if ratio > 1.0 else cv2.INTER_LINEAR
131
+ image = cv2.resize(image, (resized_w, resized_h), interpolation=interp)
132
+
133
+ dw = new_w - resized_w
134
+ dh = new_h - resized_h
135
+ dw /= 2.0
136
+ dh /= 2.0
137
+
138
+ left = int(round(dw - 0.1))
139
+ right = int(round(dw + 0.1))
140
+ top = int(round(dh - 0.1))
141
+ bottom = int(round(dh + 0.1))
142
+
143
+ padded = cv2.copyMakeBorder(
144
+ image,
145
+ top,
146
+ bottom,
147
+ left,
148
+ right,
149
+ borderType=cv2.BORDER_CONSTANT,
150
+ value=color,
151
+ )
152
+ return padded, ratio, (dw, dh)
153
+
154
+ def _preprocess(
155
+ self, image: ndarray
156
+ ) -> tuple[np.ndarray, float, tuple[float, float], tuple[int, int]]:
157
+ orig_h, orig_w = image.shape[:2]
158
+
159
+ img, ratio, pad = self._letterbox(
160
+ image, (self.input_width, self.input_height)
161
+ )
162
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
163
+ img = img.astype(np.float32) / 255.0
164
+ img = np.transpose(img, (2, 0, 1))[None, ...]
165
+ img = np.ascontiguousarray(img, dtype=np.float32)
166
+
167
+ return img, ratio, pad, (orig_w, orig_h)
168
+
169
+ @staticmethod
170
+ def _clip_boxes(boxes: np.ndarray, image_size: tuple[int, int]) -> np.ndarray:
171
+ w, h = image_size
172
+ boxes[:, 0] = np.clip(boxes[:, 0], 0, w - 1)
173
+ boxes[:, 1] = np.clip(boxes[:, 1], 0, h - 1)
174
+ boxes[:, 2] = np.clip(boxes[:, 2], 0, w - 1)
175
+ boxes[:, 3] = np.clip(boxes[:, 3], 0, h - 1)
176
+ return boxes
177
+
178
+ @staticmethod
179
+ def _xywh_to_xyxy(boxes: np.ndarray) -> np.ndarray:
180
+ out = np.empty_like(boxes)
181
+ out[:, 0] = boxes[:, 0] - boxes[:, 2] / 2.0
182
+ out[:, 1] = boxes[:, 1] - boxes[:, 3] / 2.0
183
+ out[:, 2] = boxes[:, 0] + boxes[:, 2] / 2.0
184
+ out[:, 3] = boxes[:, 1] + boxes[:, 3] / 2.0
185
+ return out
186
+
187
+ @staticmethod
188
+ def _hard_nms(
189
+ boxes: np.ndarray,
190
+ scores: np.ndarray,
191
+ iou_thresh: float,
192
+ ) -> np.ndarray:
193
+ if len(boxes) == 0:
194
+ return np.array([], dtype=np.intp)
195
+
196
+ boxes = np.asarray(boxes, dtype=np.float32)
197
+ scores = np.asarray(scores, dtype=np.float32)
198
+ order = np.argsort(scores)[::-1]
199
+ keep = []
200
+
201
+ while len(order) > 0:
202
+ i = order[0]
203
+ keep.append(i)
204
+ if len(order) == 1:
205
+ break
206
+
207
+ rest = order[1:]
208
+
209
+ xx1 = np.maximum(boxes[i, 0], boxes[rest, 0])
210
+ yy1 = np.maximum(boxes[i, 1], boxes[rest, 1])
211
+ xx2 = np.minimum(boxes[i, 2], boxes[rest, 2])
212
+ yy2 = np.minimum(boxes[i, 3], boxes[rest, 3])
213
+
214
+ inter = np.maximum(0.0, xx2 - xx1) * np.maximum(0.0, yy2 - yy1)
215
+
216
+ area_i = np.maximum(0.0, (boxes[i, 2] - boxes[i, 0])) * np.maximum(0.0, (boxes[i, 3] - boxes[i, 1]))
217
+ area_r = np.maximum(0.0, (boxes[rest, 2] - boxes[rest, 0])) * np.maximum(0.0, (boxes[rest, 3] - boxes[rest, 1]))
218
+
219
+ iou = inter / (area_i + area_r - inter + 1e-7)
220
+ order = rest[iou <= iou_thresh]
221
+
222
+ return np.array(keep, dtype=np.intp)
223
+
224
+ @staticmethod
225
+ def _box_iou_one_to_many(box: np.ndarray, boxes: np.ndarray) -> np.ndarray:
226
+ xx1 = np.maximum(box[0], boxes[:, 0])
227
+ yy1 = np.maximum(box[1], boxes[:, 1])
228
+ xx2 = np.minimum(box[2], boxes[:, 2])
229
+ yy2 = np.minimum(box[3], boxes[:, 3])
230
+
231
+ inter = np.maximum(0.0, xx2 - xx1) * np.maximum(0.0, yy2 - yy1)
232
+
233
+ area_a = max(0.0, (box[2] - box[0]) * (box[3] - box[1]))
234
+ area_b = np.maximum(0.0, boxes[:, 2] - boxes[:, 0]) * np.maximum(0.0, boxes[:, 3] - boxes[:, 1])
235
+
236
+ return inter / (area_a + area_b - inter + 1e-7)
237
+
238
+ def _filter_sane_boxes(
239
+ self,
240
+ boxes: np.ndarray,
241
+ scores: np.ndarray,
242
+ cls_ids: np.ndarray,
243
+ orig_size: tuple[int, int],
244
+ ) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
245
+ if len(boxes) == 0:
246
+ return boxes, scores, cls_ids
247
+
248
+ orig_w, orig_h = orig_size
249
+ image_area = float(orig_w * orig_h)
250
+
251
+ keep = []
252
+ for i, box in enumerate(boxes):
253
+ x1, y1, x2, y2 = box.tolist()
254
+ bw = x2 - x1
255
+ bh = y2 - y1
256
+
257
+ if bw <= 0 or bh <= 0:
258
+ continue
259
+ if bw < self.min_w or bh < self.min_h:
260
+ continue
261
+
262
+ area = bw * bh
263
+ if area < self.min_box_area:
264
+ continue
265
+ if area > self.max_box_area_ratio * image_area:
266
+ continue
267
+
268
+ ar = max(bw / max(bh, 1e-6), bh / max(bw, 1e-6))
269
+ if ar > self.max_aspect_ratio:
270
+ continue
271
+
272
+ keep.append(i)
273
+
274
+ if not keep:
275
+ return (
276
+ np.empty((0, 4), dtype=np.float32),
277
+ np.empty((0,), dtype=np.float32),
278
+ np.empty((0,), dtype=np.int32),
279
+ )
280
+
281
+ keep = np.array(keep, dtype=np.intp)
282
+ return boxes[keep], scores[keep], cls_ids[keep]
283
+
284
+ def _decode_final_dets(
285
+ self,
286
+ preds: np.ndarray,
287
+ ratio: float,
288
+ pad: tuple[float, float],
289
+ orig_size: tuple[int, int],
290
+ ) -> list[BoundingBox]:
291
+ if preds.ndim == 3 and preds.shape[0] == 1:
292
+ preds = preds[0]
293
+
294
+ if preds.ndim != 2 or preds.shape[1] < 6:
295
+ raise ValueError(f"Unexpected ONNX final-det output shape: {preds.shape}")
296
+
297
+ boxes = preds[:, :4].astype(np.float32)
298
+ scores = preds[:, 4].astype(np.float32)
299
+ cls_ids = preds[:, 5].astype(np.int32)
300
+
301
+ # person only
302
+ keep = cls_ids == 0
303
+ boxes = boxes[keep]
304
+ scores = scores[keep]
305
+ cls_ids = cls_ids[keep]
306
+
307
+ # candidate threshold
308
+ keep = scores >= self.conf_thres
309
+ boxes = boxes[keep]
310
+ scores = scores[keep]
311
+ cls_ids = cls_ids[keep]
312
+
313
+ if len(boxes) == 0:
314
+ return []
315
+
316
+ pad_w, pad_h = pad
317
+ orig_w, orig_h = orig_size
318
+
319
+ boxes[:, [0, 2]] -= pad_w
320
+ boxes[:, [1, 3]] -= pad_h
321
+ boxes /= ratio
322
+ boxes = self._clip_boxes(boxes, (orig_w, orig_h))
323
+
324
+ boxes, scores, cls_ids = self._filter_sane_boxes(boxes, scores, cls_ids, orig_size)
325
+ if len(boxes) == 0:
326
+ return []
327
+
328
+ keep_idx = self._hard_nms(boxes, scores, self.iou_thres)
329
+ keep_idx = keep_idx[: self.max_det]
330
+
331
+ boxes = boxes[keep_idx]
332
+ scores = scores[keep_idx]
333
+ cls_ids = cls_ids[keep_idx]
334
+
335
+ return [
336
+ BoundingBox(
337
+ x1=int(math.floor(box[0])),
338
+ y1=int(math.floor(box[1])),
339
+ x2=int(math.ceil(box[2])),
340
+ y2=int(math.ceil(box[3])),
341
+ cls_id=int(cls_id),
342
+ conf=float(conf),
343
+ )
344
+ for box, conf, cls_id in zip(boxes, scores, cls_ids)
345
+ if box[2] > box[0] and box[3] > box[1]
346
+ ]
347
+
348
+ def _decode_raw_yolo(
349
+ self,
350
+ preds: np.ndarray,
351
+ ratio: float,
352
+ pad: tuple[float, float],
353
+ orig_size: tuple[int, int],
354
+ ) -> list[BoundingBox]:
355
+ if preds.ndim != 3:
356
+ raise ValueError(f"Unexpected raw ONNX output shape: {preds.shape}")
357
+ if preds.shape[0] != 1:
358
+ raise ValueError(f"Unexpected batch dimension in raw output: {preds.shape}")
359
+
360
+ preds = preds[0]
361
+
362
+ # Normalize to [N, C]
363
+ if preds.shape[0] <= 16 and preds.shape[1] > preds.shape[0]:
364
+ preds = preds.T
365
+
366
+ if preds.ndim != 2 or preds.shape[1] < 5:
367
+ raise ValueError(f"Unexpected normalized raw output shape: {preds.shape}")
368
+
369
+ boxes_xywh = preds[:, :4].astype(np.float32)
370
+ tail = preds[:, 4:].astype(np.float32)
371
+
372
+ # Supports:
373
+ # [x,y,w,h,score] single-class
374
+ # [x,y,w,h,obj,cls] YOLO standard single-class
375
+ # [x,y,w,h,obj,cls1,cls2,...] multi-class
376
+ if tail.shape[1] == 1:
377
+ scores = tail[:, 0]
378
+ cls_ids = np.zeros(len(scores), dtype=np.int32)
379
+ elif tail.shape[1] == 2:
380
+ obj = tail[:, 0]
381
+ cls_prob = tail[:, 1]
382
+ scores = obj * cls_prob
383
+ cls_ids = np.zeros(len(scores), dtype=np.int32)
384
+ else:
385
+ obj = tail[:, 0]
386
+ class_probs = tail[:, 1:]
387
+ cls_ids = np.argmax(class_probs, axis=1).astype(np.int32)
388
+ cls_scores = class_probs[np.arange(len(class_probs)), cls_ids]
389
+ scores = obj * cls_scores
390
+
391
+ keep = cls_ids == 0
392
+ boxes_xywh = boxes_xywh[keep]
393
+ scores = scores[keep]
394
+ cls_ids = cls_ids[keep]
395
+
396
+ keep = scores >= self.conf_thres
397
+ boxes_xywh = boxes_xywh[keep]
398
+ scores = scores[keep]
399
+ cls_ids = cls_ids[keep]
400
+
401
+ if len(boxes_xywh) == 0:
402
+ return []
403
+
404
+ boxes = self._xywh_to_xyxy(boxes_xywh)
405
+
406
+ pad_w, pad_h = pad
407
+ orig_w, orig_h = orig_size
408
+
409
+ boxes[:, [0, 2]] -= pad_w
410
+ boxes[:, [1, 3]] -= pad_h
411
+ boxes /= ratio
412
+ boxes = self._clip_boxes(boxes, (orig_w, orig_h))
413
+
414
+ boxes, scores, cls_ids = self._filter_sane_boxes(boxes, scores, cls_ids, orig_size)
415
+ if len(boxes) == 0:
416
+ return []
417
+
418
+ keep_idx = self._hard_nms(boxes, scores, self.iou_thres)
419
+ keep_idx = keep_idx[: self.max_det]
420
+
421
+ boxes = boxes[keep_idx]
422
+ scores = scores[keep_idx]
423
+ cls_ids = cls_ids[keep_idx]
424
+
425
+ return [
426
+ BoundingBox(
427
+ x1=int(math.floor(box[0])),
428
+ y1=int(math.floor(box[1])),
429
+ x2=int(math.ceil(box[2])),
430
+ y2=int(math.ceil(box[3])),
431
+ cls_id=int(cls_id),
432
+ conf=float(conf),
433
+ )
434
+ for box, conf, cls_id in zip(boxes, scores, cls_ids)
435
+ if box[2] > box[0] and box[3] > box[1]
436
+ ]
437
+
438
+ def _postprocess(
439
+ self,
440
+ output: np.ndarray,
441
+ ratio: float,
442
+ pad: tuple[float, float],
443
+ orig_size: tuple[int, int],
444
+ ) -> list[BoundingBox]:
445
+ if output.ndim == 2 and output.shape[1] >= 6:
446
+ return self._decode_final_dets(output, ratio, pad, orig_size)
447
+
448
+ if output.ndim == 3 and output.shape[0] == 1 and output.shape[2] >= 6:
449
+ return self._decode_final_dets(output, ratio, pad, orig_size)
450
+
451
+ return self._decode_raw_yolo(output, ratio, pad, orig_size)
452
+
453
+ def _predict_single(self, image: np.ndarray) -> list[BoundingBox]:
454
+ if image is None:
455
+ raise ValueError("Input image is None")
456
+ if not isinstance(image, np.ndarray):
457
+ raise TypeError(f"Input is not numpy array: {type(image)}")
458
+ if image.ndim != 3:
459
+ raise ValueError(f"Expected HWC image, got shape={image.shape}")
460
+ if image.shape[0] <= 0 or image.shape[1] <= 0:
461
+ raise ValueError(f"Invalid image shape={image.shape}")
462
+ if image.shape[2] != 3:
463
+ raise ValueError(f"Expected 3 channels, got shape={image.shape}")
464
+
465
+ if image.dtype != np.uint8:
466
+ image = image.astype(np.uint8)
467
+
468
+ input_tensor, ratio, pad, orig_size = self._preprocess(image)
469
+
470
+ expected_shape = (1, 3, self.input_height, self.input_width)
471
+ if input_tensor.shape != expected_shape:
472
+ raise ValueError(
473
+ f"Bad input tensor shape={input_tensor.shape}, expected={expected_shape}"
474
+ )
475
+
476
+ outputs = self.session.run(self.output_names, {self.input_name: input_tensor})
477
+ det_output = outputs[0]
478
+ return self._postprocess(det_output, ratio, pad, orig_size)
479
+
480
+ def _merge_tta_consensus(
481
+ self,
482
+ boxes_orig: list[BoundingBox],
483
+ boxes_flip: list[BoundingBox],
484
+ ) -> list[BoundingBox]:
485
+ """
486
+ Keep:
487
+ - any box with conf >= conf_high
488
+ - low/medium-conf boxes only if confirmed across TTA views
489
+ Then run final hard NMS.
490
+ """
491
+ if not boxes_orig and not boxes_flip:
492
+ return []
493
+
494
+ coords_o = np.array([[b.x1, b.y1, b.x2, b.y2] for b in boxes_orig], dtype=np.float32) if boxes_orig else np.empty((0, 4), dtype=np.float32)
495
+ scores_o = np.array([b.conf for b in boxes_orig], dtype=np.float32) if boxes_orig else np.empty((0,), dtype=np.float32)
496
+
497
+ coords_f = np.array([[b.x1, b.y1, b.x2, b.y2] for b in boxes_flip], dtype=np.float32) if boxes_flip else np.empty((0, 4), dtype=np.float32)
498
+ scores_f = np.array([b.conf for b in boxes_flip], dtype=np.float32) if boxes_flip else np.empty((0,), dtype=np.float32)
499
+
500
+ accepted_boxes = []
501
+ accepted_scores = []
502
+
503
+ # Original view candidates
504
+ for i in range(len(coords_o)):
505
+ score = scores_o[i]
506
+ if score >= self.conf_high:
507
+ accepted_boxes.append(coords_o[i])
508
+ accepted_scores.append(score)
509
+ elif len(coords_f) > 0:
510
+ ious = self._box_iou_one_to_many(coords_o[i], coords_f)
511
+ j = int(np.argmax(ious))
512
+ if ious[j] >= self.tta_match_iou:
513
+ fused_score = max(score, scores_f[j])
514
+ accepted_boxes.append(coords_o[i])
515
+ accepted_scores.append(fused_score)
516
+
517
+ # Flipped-view high-confidence boxes that original missed
518
+ for i in range(len(coords_f)):
519
+ score = scores_f[i]
520
+ if score < self.conf_high:
521
+ continue
522
+
523
+ if len(coords_o) == 0:
524
+ accepted_boxes.append(coords_f[i])
525
+ accepted_scores.append(score)
526
+ continue
527
+
528
+ ious = self._box_iou_one_to_many(coords_f[i], coords_o)
529
+ if np.max(ious) < self.tta_match_iou:
530
+ accepted_boxes.append(coords_f[i])
531
+ accepted_scores.append(score)
532
+
533
+ if not accepted_boxes:
534
+ return []
535
+
536
+ boxes = np.array(accepted_boxes, dtype=np.float32)
537
+ scores = np.array(accepted_scores, dtype=np.float32)
538
+
539
+ keep = self._hard_nms(boxes, scores, self.iou_thres)
540
+ keep = keep[: self.max_det]
541
+
542
+ out = []
543
+ for idx in keep:
544
+ x1, y1, x2, y2 = boxes[idx].tolist()
545
+ out.append(
546
+ BoundingBox(
547
+ x1=int(math.floor(x1)),
548
+ y1=int(math.floor(y1)),
549
+ x2=int(math.ceil(x2)),
550
+ y2=int(math.ceil(y2)),
551
+ cls_id=0,
552
+ conf=float(scores[idx]),
553
+ )
554
+ )
555
+ return out
556
+
557
+ def _predict_tta(self, image: np.ndarray) -> list[BoundingBox]:
558
+ boxes_orig = self._predict_single(image)
559
+
560
+ flipped = cv2.flip(image, 1)
561
+ boxes_flip_raw = self._predict_single(flipped)
562
+
563
+ w = image.shape[1]
564
+ boxes_flip = [
565
+ BoundingBox(
566
+ x1=w - b.x2,
567
+ y1=b.y1,
568
+ x2=w - b.x1,
569
+ y2=b.y2,
570
+ cls_id=b.cls_id,
571
+ conf=b.conf,
572
+ )
573
+ for b in boxes_flip_raw
574
+ ]
575
+
576
+ return self._merge_tta_consensus(boxes_orig, boxes_flip)
577
+
578
+ def _adaptive_conf_threshold(self, n_raw: int) -> float:
579
+ """
580
+ Dynamic confidence threshold based on raw detection count.
581
+
582
+ total_score = mAP50 * 0.65 + FP_score * 0.35
583
+ - Few objects → each TP worth ~0.065/n for mAP50 → keep low conf (maximize recall)
584
+ - Many objects → each TP worth little, FPs dominate → raise conf (minimize FP)
585
+ """
586
+ if n_raw <= self.count_low:
587
+ return self.conf_adapt_low
588
+ if n_raw >= self.count_high:
589
+ return self.conf_adapt_high
590
+ t = (n_raw - self.count_low) / (self.count_high - self.count_low)
591
+ return self.conf_adapt_low + t * (self.conf_adapt_high - self.conf_adapt_low)
592
+
593
+ def _apply_adaptive_filter(self, boxes: list[BoundingBox]) -> list[BoundingBox]:
594
+ if not boxes:
595
+ return boxes
596
+ n_raw = len(boxes)
597
+ thresh = self._adaptive_conf_threshold(n_raw)
598
+ return [b for b in boxes if b.conf >= thresh]
599
+
600
+ def predict_batch(
601
+ self,
602
+ batch_images: list[ndarray],
603
+ offset: int,
604
+ n_keypoints: int,
605
+ ) -> list[TVFrameResult]:
606
+ results: list[TVFrameResult] = []
607
+
608
+ for frame_number_in_batch, image in enumerate(batch_images):
609
+ try:
610
+ if self.use_tta:
611
+ boxes = self._predict_tta(image)
612
+ else:
613
+ boxes = self._predict_single(image)
614
+ boxes = self._apply_adaptive_filter(boxes)
615
+ except Exception as e:
616
+ print(f"⚠️ Inference failed for frame {offset + frame_number_in_batch}: {e}")
617
+ boxes = []
618
+
619
+ results.append(
620
+ TVFrameResult(
621
+ frame_id=offset + frame_number_in_batch,
622
+ boxes=boxes,
623
+ keypoints=[(0, 0) for _ in range(max(0, int(n_keypoints)))],
624
+ )
625
+ )
626
+
627
+ return results
weights.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:07de8ad467ec6807d2833815ff4d02855ecfd35915609b2f0734f58aeb0ceb25
3
+ size 19405530