SuperBitDev commited on
Commit
77ee914
·
verified ·
1 Parent(s): 4278c3e

Upload folder using huggingface_hub

Browse files
Files changed (3) hide show
  1. chute_config.yml +20 -0
  2. miner.py +865 -0
  3. weights.onnx +3 -0
chute_config.yml ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Image:
2
+ from_base: parachutes/python:3.12
3
+ run_command:
4
+ - pip install --upgrade setuptools wheel
5
+ - pip install 'numpy>=1.23' 'onnxruntime-gpu>=1.16' 'opencv-python>=4.7' 'pillow>=9.5' 'huggingface_hub>=0.19.4' 'pydantic>=2.0' 'pyyaml>=6.0' 'aiohttp>=3.9'
6
+ - pip install torch==2.8.0 torchvision==0.23.0 torchaudio==2.8.0 --index-url https://download.pytorch.org/whl/cu128
7
+
8
+ NodeSelector:
9
+ gpu_count: 1
10
+ min_vram_gb_per_gpu: 16
11
+ include:
12
+ - pro_6000
13
+
14
+ Chute:
15
+ timeout_seconds: 900
16
+ concurrency: 4
17
+ max_instances: 5
18
+ scaling_threshold: 0.5
19
+ shutdown_after_seconds: 288000
20
+ tee: true
miner.py ADDED
@@ -0,0 +1,865 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ import math
3
+ from typing import Any
4
+
5
+ import cv2
6
+ import numpy as np
7
+ import onnxruntime as ort
8
+ from numpy import ndarray
9
+ from pydantic import BaseModel
10
+
11
+ # Profile 002-001: same implementation as miner_script/002/miner.py (crime
12
+ # classes, defaults, TTA merge, and ONNX sweep-cache helpers). Fork here only
13
+ # when you need a frozen hyperparameter variant.
14
+
15
+
16
+ class BoundingBox(BaseModel):
17
+ x1: int
18
+ y1: int
19
+ x2: int
20
+ y2: int
21
+ cls_id: int
22
+ conf: float
23
+
24
+
25
+ class TVFrameResult(BaseModel):
26
+ frame_id: int
27
+ boxes: list[BoundingBox]
28
+ keypoints: list[tuple[int, int]]
29
+
30
+
31
+ class Miner:
32
+ def __init__(self, path_hf_repo: Path) -> None:
33
+ model_path = path_hf_repo / "weights.onnx"
34
+
35
+ # Canonical class indices (validator / PGT / manifest); match dataset.yaml names order.
36
+ # Crime element (evaluations_7/dataset.yaml):
37
+ # balaclava=0, hoodie=1, glove=2, bat=3, spray paint=4, graffiti=5
38
+ self.class_names = ['balaclava', 'hoodie', 'glove', 'bat', 'spray paint', 'graffiti']
39
+ # ONNX class index order from training export (Ultralytics names 0..5 in dataset.yaml).
40
+ model_class_order = ['balaclava', 'bat', 'glove', 'graffiti', 'hoodie', 'spray paint']
41
+ self._train_cls_to_canonical = np.array(
42
+ [self.class_names.index(n) for n in model_class_order],
43
+ dtype=np.int32
44
+ )
45
+ print("ORT version:", ort.__version__)
46
+
47
+ try:
48
+ ort.preload_dlls()
49
+ print("✅ onnxruntime.preload_dlls() success")
50
+ except Exception as e:
51
+ print(f"⚠️ preload_dlls failed: {e}")
52
+
53
+ print("ORT available providers BEFORE session:", ort.get_available_providers())
54
+
55
+ sess_options = ort.SessionOptions()
56
+ sess_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL
57
+
58
+ try:
59
+ self.session = ort.InferenceSession(
60
+ str(model_path),
61
+ sess_options=sess_options,
62
+ providers=["CUDAExecutionProvider", "CPUExecutionProvider"],
63
+ )
64
+ print("✅ Created ORT session with preferred CUDA provider list")
65
+ except Exception as e:
66
+ print(f"⚠️ CUDA session creation failed, falling back to CPU: {e}")
67
+ self.session = ort.InferenceSession(
68
+ str(model_path),
69
+ sess_options=sess_options,
70
+ providers=["CPUExecutionProvider"],
71
+ )
72
+
73
+ print("ORT session providers:", self.session.get_providers())
74
+
75
+ for inp in self.session.get_inputs():
76
+ print("INPUT:", inp.name, inp.shape, inp.type)
77
+
78
+ for out in self.session.get_outputs():
79
+ print("OUTPUT:", out.name, out.shape, out.type)
80
+
81
+ self.input_name = self.session.get_inputs()[0].name
82
+ self.output_names = [output.name for output in self.session.get_outputs()]
83
+ self.input_shape = self.session.get_inputs()[0].shape
84
+
85
+ self.input_height = self._safe_dim(self.input_shape[2], default=1280)
86
+ self.input_width = self._safe_dim(self.input_shape[3], default=1280)
87
+
88
+ # ---------- Scoring-oriented thresholds (crime: balaclava / hoodie / glove /
89
+ # bat / spray paint / graffiti) ----------
90
+ # Crime classes mix wearables (balaclava, hoodie), small handhelds
91
+ # (glove, spray paint), long handhelds (bat) and large markings
92
+ # (graffiti). Confidence peaks vary widely so we keep a moderate
93
+ # floor and lean on TTA consensus for the soft tail.
94
+ self.conf_thres = 0.15
95
+
96
+ # Above this on orig view, accept directly. Below it, require TTA agreement.
97
+ self.conf_high = 0.32
98
+
99
+ # Standard NMS IoU; balaclavas / hoodies on the same person can
100
+ # overlap heavily but they're different classes so the per-class
101
+ # NMS handles that — 0.50 stays a safe default.
102
+ self.iou_thres = 0.35
103
+
104
+ # Balaclavas / hoodies barely shift between orig and h-flipped views,
105
+ # but graffiti and bats can have asymmetric extents so a moderate
106
+ # IoU gate keeps the consensus rule from getting too strict.
107
+ self.tta_match_iou = 0.15
108
+
109
+ self.max_det = 150
110
+ self.use_tta = True
111
+
112
+ # Box sanity filters tuned for crime:
113
+ # - smallest classes (glove, spray paint) can shrink to ~10 px on
114
+ # the short side in wide shots — keep min thresholds modest
115
+ # - aspect ratio: bats are long/thin (h/w ratio up to ~6) and
116
+ # graffiti can be very wide; allow up to 8.0
117
+ # - a single graffiti tag can fill most of a wall, so allow up to
118
+ # ~95% image-area for one box
119
+ self.min_box_area = 10 * 10
120
+ self.min_w = 6
121
+ self.min_h = 6
122
+ self.max_aspect_ratio = 8.0
123
+ self.max_box_area_ratio = 0.95
124
+
125
+ print(f"✅ ONNX model loaded from: {model_path}")
126
+ print(f"✅ ONNX providers: {self.session.get_providers()}")
127
+ print(f"✅ ONNX input: name={self.input_name}, shape={self.input_shape}")
128
+
129
+ def __repr__(self) -> str:
130
+ return (
131
+ f"ONNXRuntime(session={type(self.session).__name__}, "
132
+ f"providers={self.session.get_providers()})"
133
+ )
134
+
135
+ @staticmethod
136
+ def _safe_dim(value, default: int) -> int:
137
+ return value if isinstance(value, int) and value > 0 else default
138
+
139
+ def _remap_train_cls_ids(self, cls_ids: np.ndarray) -> np.ndarray:
140
+ idx = np.clip(cls_ids.astype(np.int64, copy=False), 0, len(self._train_cls_to_canonical) - 1)
141
+ return self._train_cls_to_canonical[idx]
142
+
143
+ def _letterbox(
144
+ self,
145
+ image: ndarray,
146
+ new_shape: tuple[int, int],
147
+ color=(114, 114, 114),
148
+ ) -> tuple[ndarray, float, tuple[float, float]]:
149
+ h, w = image.shape[:2]
150
+ new_w, new_h = new_shape
151
+
152
+ ratio = min(new_w / w, new_h / h)
153
+ resized_w = int(round(w * ratio))
154
+ resized_h = int(round(h * ratio))
155
+
156
+ if (resized_w, resized_h) != (w, h):
157
+ interp = cv2.INTER_CUBIC if ratio > 1.0 else cv2.INTER_LINEAR
158
+ image = cv2.resize(image, (resized_w, resized_h), interpolation=interp)
159
+
160
+ dw = new_w - resized_w
161
+ dh = new_h - resized_h
162
+ dw /= 2.0
163
+ dh /= 2.0
164
+
165
+ left = int(round(dw - 0.1))
166
+ right = int(round(dw + 0.1))
167
+ top = int(round(dh - 0.1))
168
+ bottom = int(round(dh + 0.1))
169
+
170
+ padded = cv2.copyMakeBorder(
171
+ image,
172
+ top,
173
+ bottom,
174
+ left,
175
+ right,
176
+ borderType=cv2.BORDER_CONSTANT,
177
+ value=color,
178
+ )
179
+ return padded, ratio, (dw, dh)
180
+
181
+ def _preprocess(
182
+ self, image: ndarray
183
+ ) -> tuple[np.ndarray, float, tuple[float, float], tuple[int, int]]:
184
+ orig_h, orig_w = image.shape[:2]
185
+
186
+ img, ratio, pad = self._letterbox(
187
+ image, (self.input_width, self.input_height)
188
+ )
189
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
190
+ img = img.astype(np.float32) / 255.0
191
+ img = np.transpose(img, (2, 0, 1))[None, ...]
192
+ img = np.ascontiguousarray(img, dtype=np.float32)
193
+
194
+ return img, ratio, pad, (orig_w, orig_h)
195
+
196
+ @staticmethod
197
+ def _clip_boxes(boxes: np.ndarray, image_size: tuple[int, int]) -> np.ndarray:
198
+ w, h = image_size
199
+ boxes[:, 0] = np.clip(boxes[:, 0], 0, w - 1)
200
+ boxes[:, 1] = np.clip(boxes[:, 1], 0, h - 1)
201
+ boxes[:, 2] = np.clip(boxes[:, 2], 0, w - 1)
202
+ boxes[:, 3] = np.clip(boxes[:, 3], 0, h - 1)
203
+ return boxes
204
+
205
+ @staticmethod
206
+ def _xywh_to_xyxy(boxes: np.ndarray) -> np.ndarray:
207
+ out = np.empty_like(boxes)
208
+ out[:, 0] = boxes[:, 0] - boxes[:, 2] / 2.0
209
+ out[:, 1] = boxes[:, 1] - boxes[:, 3] / 2.0
210
+ out[:, 2] = boxes[:, 0] + boxes[:, 2] / 2.0
211
+ out[:, 3] = boxes[:, 1] + boxes[:, 3] / 2.0
212
+ return out
213
+
214
+ @staticmethod
215
+ def _hard_nms(
216
+ boxes: np.ndarray,
217
+ scores: np.ndarray,
218
+ iou_thresh: float,
219
+ ) -> np.ndarray:
220
+ if len(boxes) == 0:
221
+ return np.array([], dtype=np.intp)
222
+
223
+ boxes = np.asarray(boxes, dtype=np.float32)
224
+ scores = np.asarray(scores, dtype=np.float32)
225
+ order = np.argsort(scores)[::-1]
226
+ keep = []
227
+
228
+ while len(order) > 0:
229
+ i = order[0]
230
+ keep.append(i)
231
+ if len(order) == 1:
232
+ break
233
+
234
+ rest = order[1:]
235
+
236
+ xx1 = np.maximum(boxes[i, 0], boxes[rest, 0])
237
+ yy1 = np.maximum(boxes[i, 1], boxes[rest, 1])
238
+ xx2 = np.minimum(boxes[i, 2], boxes[rest, 2])
239
+ yy2 = np.minimum(boxes[i, 3], boxes[rest, 3])
240
+
241
+ inter = np.maximum(0.0, xx2 - xx1) * np.maximum(0.0, yy2 - yy1)
242
+
243
+ area_i = np.maximum(0.0, (boxes[i, 2] - boxes[i, 0])) * np.maximum(0.0, (boxes[i, 3] - boxes[i, 1]))
244
+ area_r = np.maximum(0.0, (boxes[rest, 2] - boxes[rest, 0])) * np.maximum(0.0, (boxes[rest, 3] - boxes[rest, 1]))
245
+
246
+ iou = inter / (area_i + area_r - inter + 1e-7)
247
+ order = rest[iou <= iou_thresh]
248
+
249
+ return np.array(keep, dtype=np.intp)
250
+
251
+ @classmethod
252
+ def _nms_per_class(
253
+ cls,
254
+ boxes: np.ndarray,
255
+ scores: np.ndarray,
256
+ cls_ids: np.ndarray,
257
+ iou_thresh: float,
258
+ max_det: int,
259
+ ) -> np.ndarray:
260
+ """NMS within each class so overlapping predictions of different classes are not merged away."""
261
+ if len(boxes) == 0:
262
+ return np.array([], dtype=np.intp)
263
+ keep_all: list[int] = []
264
+ for c in np.unique(cls_ids):
265
+ idxs = np.nonzero(cls_ids == c)[0]
266
+ if len(idxs) == 0:
267
+ continue
268
+ local_keep = cls._hard_nms(boxes[idxs], scores[idxs], iou_thresh)
269
+ keep_all.extend(idxs[local_keep].tolist())
270
+ keep_all = np.array(keep_all, dtype=np.intp)
271
+ order = np.argsort(scores[keep_all])[::-1]
272
+ return keep_all[order[:max_det]]
273
+
274
+ @staticmethod
275
+ def _box_iou_one_to_many(box: np.ndarray, boxes: np.ndarray) -> np.ndarray:
276
+ xx1 = np.maximum(box[0], boxes[:, 0])
277
+ yy1 = np.maximum(box[1], boxes[:, 1])
278
+ xx2 = np.minimum(box[2], boxes[:, 2])
279
+ yy2 = np.minimum(box[3], boxes[:, 3])
280
+
281
+ inter = np.maximum(0.0, xx2 - xx1) * np.maximum(0.0, yy2 - yy1)
282
+
283
+ area_a = max(0.0, (box[2] - box[0]) * (box[3] - box[1]))
284
+ area_b = np.maximum(0.0, boxes[:, 2] - boxes[:, 0]) * np.maximum(0.0, boxes[:, 3] - boxes[:, 1])
285
+
286
+ return inter / (area_a + area_b - inter + 1e-7)
287
+
288
+ def _filter_sane_boxes(
289
+ self,
290
+ boxes: np.ndarray,
291
+ scores: np.ndarray,
292
+ cls_ids: np.ndarray,
293
+ orig_size: tuple[int, int],
294
+ ) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
295
+ if len(boxes) == 0:
296
+ return boxes, scores, cls_ids
297
+
298
+ orig_w, orig_h = orig_size
299
+ image_area = float(orig_w * orig_h)
300
+
301
+ keep = []
302
+ for i, box in enumerate(boxes):
303
+ x1, y1, x2, y2 = box.tolist()
304
+ bw = x2 - x1
305
+ bh = y2 - y1
306
+
307
+ if bw <= 0 or bh <= 0:
308
+ continue
309
+ if bw < self.min_w or bh < self.min_h:
310
+ continue
311
+
312
+ area = bw * bh
313
+ if area < self.min_box_area:
314
+ continue
315
+ if area > self.max_box_area_ratio * image_area:
316
+ continue
317
+
318
+ ar = max(bw / max(bh, 1e-6), bh / max(bw, 1e-6))
319
+ if ar > self.max_aspect_ratio:
320
+ continue
321
+
322
+ keep.append(i)
323
+
324
+ if not keep:
325
+ return (
326
+ np.empty((0, 4), dtype=np.float32),
327
+ np.empty((0,), dtype=np.float32),
328
+ np.empty((0,), dtype=np.int32),
329
+ )
330
+
331
+ keep = np.array(keep, dtype=np.intp)
332
+ return boxes[keep], scores[keep], cls_ids[keep]
333
+
334
+ def _decode_final_dets(
335
+ self,
336
+ preds: np.ndarray,
337
+ ratio: float,
338
+ pad: tuple[float, float],
339
+ orig_size: tuple[int, int],
340
+ ) -> list[BoundingBox]:
341
+ if preds.ndim == 3 and preds.shape[0] == 1:
342
+ preds = preds[0]
343
+
344
+ if preds.ndim != 2 or preds.shape[1] < 6:
345
+ raise ValueError(f"Unexpected ONNX final-det output shape: {preds.shape}")
346
+
347
+ boxes = preds[:, :4].astype(np.float32)
348
+ scores = preds[:, 4].astype(np.float32)
349
+ cls_ids = self._remap_train_cls_ids(preds[:, 5].astype(np.int32))
350
+ # Multi-class crime: balaclava / hoodie / glove / bat / spray paint / graffiti
351
+ # (see self.class_names).
352
+
353
+ # candidate threshold
354
+ keep = scores >= self.conf_thres
355
+ boxes = boxes[keep]
356
+ scores = scores[keep]
357
+ cls_ids = cls_ids[keep]
358
+
359
+ if len(boxes) == 0:
360
+ return []
361
+
362
+ pad_w, pad_h = pad
363
+ orig_w, orig_h = orig_size
364
+
365
+ boxes[:, [0, 2]] -= pad_w
366
+ boxes[:, [1, 3]] -= pad_h
367
+ boxes /= ratio
368
+ boxes = self._clip_boxes(boxes, (orig_w, orig_h))
369
+
370
+ boxes, scores, cls_ids = self._filter_sane_boxes(boxes, scores, cls_ids, orig_size)
371
+ if len(boxes) == 0:
372
+ return []
373
+
374
+ keep_idx = self._nms_per_class(
375
+ boxes, scores, cls_ids, self.iou_thres, self.max_det
376
+ )
377
+
378
+ boxes = boxes[keep_idx]
379
+ scores = scores[keep_idx]
380
+ cls_ids = cls_ids[keep_idx]
381
+
382
+ return [
383
+ BoundingBox(
384
+ x1=int(math.floor(box[0])),
385
+ y1=int(math.floor(box[1])),
386
+ x2=int(math.ceil(box[2])),
387
+ y2=int(math.ceil(box[3])),
388
+ cls_id=int(cls_id),
389
+ conf=float(conf),
390
+ )
391
+ for box, conf, cls_id in zip(boxes, scores, cls_ids)
392
+ if box[2] > box[0] and box[3] > box[1]
393
+ ]
394
+
395
+ def _decode_raw_yolo(
396
+ self,
397
+ preds: np.ndarray,
398
+ ratio: float,
399
+ pad: tuple[float, float],
400
+ orig_size: tuple[int, int],
401
+ ) -> list[BoundingBox]:
402
+ if preds.ndim != 3:
403
+ raise ValueError(f"Unexpected raw ONNX output shape: {preds.shape}")
404
+ if preds.shape[0] != 1:
405
+ raise ValueError(f"Unexpected batch dimension in raw output: {preds.shape}")
406
+
407
+ preds = preds[0]
408
+
409
+ # Normalize to [N, C]
410
+ if preds.shape[0] <= 16 and preds.shape[1] > preds.shape[0]:
411
+ preds = preds.T
412
+
413
+ if preds.ndim != 2 or preds.shape[1] < 5:
414
+ raise ValueError(f"Unexpected normalized raw output shape: {preds.shape}")
415
+
416
+ boxes_xywh = preds[:, :4].astype(np.float32)
417
+ tail = preds[:, 4:].astype(np.float32)
418
+
419
+ # Supports:
420
+ # [x,y,w,h,score] single-class
421
+ # [x,y,w,h,obj,cls] YOLO standard single-class
422
+ # [x,y,w,h,obj,cls1,cls2,...] multi-class
423
+ if tail.shape[1] == 1:
424
+ scores = tail[:, 0]
425
+ cls_ids = np.zeros(len(scores), dtype=np.int32)
426
+ elif tail.shape[1] == 2:
427
+ obj = tail[:, 0]
428
+ cls_prob = tail[:, 1]
429
+ scores = obj * cls_prob
430
+ cls_ids = np.zeros(len(scores), dtype=np.int32)
431
+ else:
432
+ obj = tail[:, 0]
433
+ class_probs = tail[:, 1:]
434
+ cls_ids = np.argmax(class_probs, axis=1).astype(np.int32)
435
+ cls_scores = class_probs[np.arange(len(class_probs)), cls_ids]
436
+ scores = obj * cls_scores
437
+
438
+ cls_ids = self._remap_train_cls_ids(cls_ids)
439
+
440
+ keep = scores >= self.conf_thres
441
+ boxes_xywh = boxes_xywh[keep]
442
+ scores = scores[keep]
443
+ cls_ids = cls_ids[keep]
444
+
445
+ if len(boxes_xywh) == 0:
446
+ return []
447
+
448
+ boxes = self._xywh_to_xyxy(boxes_xywh)
449
+
450
+ pad_w, pad_h = pad
451
+ orig_w, orig_h = orig_size
452
+
453
+ boxes[:, [0, 2]] -= pad_w
454
+ boxes[:, [1, 3]] -= pad_h
455
+ boxes /= ratio
456
+ boxes = self._clip_boxes(boxes, (orig_w, orig_h))
457
+
458
+ boxes, scores, cls_ids = self._filter_sane_boxes(boxes, scores, cls_ids, orig_size)
459
+ if len(boxes) == 0:
460
+ return []
461
+
462
+ keep_idx = self._nms_per_class(
463
+ boxes, scores, cls_ids, self.iou_thres, self.max_det
464
+ )
465
+
466
+ boxes = boxes[keep_idx]
467
+ scores = scores[keep_idx]
468
+ cls_ids = cls_ids[keep_idx]
469
+
470
+ return [
471
+ BoundingBox(
472
+ x1=int(math.floor(box[0])),
473
+ y1=int(math.floor(box[1])),
474
+ x2=int(math.ceil(box[2])),
475
+ y2=int(math.ceil(box[3])),
476
+ cls_id=int(cls_id),
477
+ conf=float(conf),
478
+ )
479
+ for box, conf, cls_id in zip(boxes, scores, cls_ids)
480
+ if box[2] > box[0] and box[3] > box[1]
481
+ ]
482
+
483
+ def _postprocess(
484
+ self,
485
+ output: np.ndarray,
486
+ ratio: float,
487
+ pad: tuple[float, float],
488
+ orig_size: tuple[int, int],
489
+ ) -> list[BoundingBox]:
490
+ if output.ndim == 2 and output.shape[1] >= 6:
491
+ return self._decode_final_dets(output, ratio, pad, orig_size)
492
+
493
+ if output.ndim == 3 and output.shape[0] == 1 and output.shape[2] >= 6:
494
+ return self._decode_final_dets(output, ratio, pad, orig_size)
495
+
496
+ return self._decode_raw_yolo(output, ratio, pad, orig_size)
497
+
498
+ def _predict_single(self, image: np.ndarray) -> list[BoundingBox]:
499
+ if image is None:
500
+ raise ValueError("Input image is None")
501
+ if not isinstance(image, np.ndarray):
502
+ raise TypeError(f"Input is not numpy array: {type(image)}")
503
+ if image.ndim != 3:
504
+ raise ValueError(f"Expected HWC image, got shape={image.shape}")
505
+ if image.shape[0] <= 0 or image.shape[1] <= 0:
506
+ raise ValueError(f"Invalid image shape={image.shape}")
507
+ if image.shape[2] != 3:
508
+ raise ValueError(f"Expected 3 channels, got shape={image.shape}")
509
+
510
+ if image.dtype != np.uint8:
511
+ image = image.astype(np.uint8)
512
+
513
+ input_tensor, ratio, pad, orig_size = self._preprocess(image)
514
+
515
+ expected_shape = (1, 3, self.input_height, self.input_width)
516
+ if input_tensor.shape != expected_shape:
517
+ raise ValueError(
518
+ f"Bad input tensor shape={input_tensor.shape}, expected={expected_shape}"
519
+ )
520
+
521
+ outputs = self.session.run(self.output_names, {self.input_name: input_tensor})
522
+ det_output = outputs[0]
523
+ return self._postprocess(det_output, ratio, pad, orig_size)
524
+
525
+ def _merge_tta_consensus(
526
+ self,
527
+ boxes_orig: list[BoundingBox],
528
+ boxes_flip: list[BoundingBox],
529
+ *,
530
+ conf_high: float | None = None,
531
+ tta_match_iou: float | None = None,
532
+ iou_thres: float | None = None,
533
+ ) -> list[BoundingBox]:
534
+ """
535
+ Keep:
536
+ - any box with conf >= conf_high
537
+ - low/medium-conf boxes only if confirmed across TTA views
538
+ Then run final hard NMS.
539
+ All thresholds default to the instance attributes when not supplied,
540
+ so the non-sweep path can call this without args. The sweep path
541
+ passes explicit values to avoid mutating shared state across
542
+ parameter combinations (and to be safe under any future concurrency).
543
+ """
544
+ ch = float(conf_high) if conf_high is not None else float(self.conf_high)
545
+ tm = float(tta_match_iou) if tta_match_iou is not None else float(self.tta_match_iou)
546
+ ih = float(iou_thres) if iou_thres is not None else float(self.iou_thres)
547
+
548
+ if not boxes_orig and not boxes_flip:
549
+ return []
550
+
551
+ coords_o = np.array([[b.x1, b.y1, b.x2, b.y2] for b in boxes_orig], dtype=np.float32) if boxes_orig else np.empty((0, 4), dtype=np.float32)
552
+ scores_o = np.array([b.conf for b in boxes_orig], dtype=np.float32) if boxes_orig else np.empty((0,), dtype=np.float32)
553
+ cls_o = np.array([b.cls_id for b in boxes_orig], dtype=np.int32) if boxes_orig else np.empty((0,), dtype=np.int32)
554
+
555
+ coords_f = np.array([[b.x1, b.y1, b.x2, b.y2] for b in boxes_flip], dtype=np.float32) if boxes_flip else np.empty((0, 4), dtype=np.float32)
556
+ scores_f = np.array([b.conf for b in boxes_flip], dtype=np.float32) if boxes_flip else np.empty((0,), dtype=np.float32)
557
+ cls_f = np.array([b.cls_id for b in boxes_flip], dtype=np.int32) if boxes_flip else np.empty((0,), dtype=np.int32)
558
+
559
+ accepted_boxes = []
560
+ accepted_scores = []
561
+ accepted_cls = []
562
+
563
+ # Original view candidates
564
+ for i in range(len(coords_o)):
565
+ score = scores_o[i]
566
+ if score >= ch:
567
+ accepted_boxes.append(coords_o[i])
568
+ accepted_scores.append(score)
569
+ accepted_cls.append(int(cls_o[i]))
570
+ elif len(coords_f) > 0:
571
+ ious = self._box_iou_one_to_many(coords_o[i], coords_f)
572
+ j = int(np.argmax(ious))
573
+ if ious[j] >= tm:
574
+ fused_score = max(score, scores_f[j])
575
+ accepted_boxes.append(coords_o[i])
576
+ accepted_scores.append(fused_score)
577
+ accepted_cls.append(int(cls_o[i]))
578
+
579
+ # Flipped-view high-confidence boxes that original missed
580
+ for i in range(len(coords_f)):
581
+ score = scores_f[i]
582
+ if score < ch:
583
+ continue
584
+
585
+ if len(coords_o) == 0:
586
+ accepted_boxes.append(coords_f[i])
587
+ accepted_scores.append(score)
588
+ accepted_cls.append(int(cls_f[i]))
589
+ continue
590
+
591
+ ious = self._box_iou_one_to_many(coords_f[i], coords_o)
592
+ if np.max(ious) < tm:
593
+ accepted_boxes.append(coords_f[i])
594
+ accepted_scores.append(score)
595
+ accepted_cls.append(int(cls_f[i]))
596
+
597
+ if not accepted_boxes:
598
+ return []
599
+
600
+ boxes = np.array(accepted_boxes, dtype=np.float32)
601
+ scores = np.array(accepted_scores, dtype=np.float32)
602
+ cls_ids = np.array(accepted_cls, dtype=np.int32)
603
+
604
+ keep = self._nms_per_class(boxes, scores, cls_ids, ih, self.max_det)
605
+
606
+ out = []
607
+ for idx in keep:
608
+ x1, y1, x2, y2 = boxes[idx].tolist()
609
+ out.append(
610
+ BoundingBox(
611
+ x1=int(math.floor(x1)),
612
+ y1=int(math.floor(y1)),
613
+ x2=int(math.ceil(x2)),
614
+ y2=int(math.ceil(y2)),
615
+ cls_id=int(cls_ids[idx]),
616
+ conf=float(scores[idx]),
617
+ )
618
+ )
619
+ return out
620
+
621
+ def _predict_tta(self, image: np.ndarray) -> list[BoundingBox]:
622
+ boxes_orig = self._predict_single(image)
623
+
624
+ flipped = cv2.flip(image, 1)
625
+ boxes_flip_raw = self._predict_single(flipped)
626
+
627
+ w = image.shape[1]
628
+ boxes_flip = [
629
+ BoundingBox(
630
+ x1=w - b.x2,
631
+ y1=b.y1,
632
+ x2=w - b.x1,
633
+ y2=b.y2,
634
+ cls_id=b.cls_id,
635
+ conf=b.conf,
636
+ )
637
+ for b in boxes_flip_raw
638
+ ]
639
+
640
+ return self._merge_tta_consensus(boxes_orig, boxes_flip)
641
+
642
+ # --- Fast sweep: two ONNX runs per image, then CPU-only threshold / NMS / TTA merge ---
643
+ # Must be <= the smallest conf_thres any sweep will try, otherwise the sweep silently
644
+ # caps the effective threshold and reported "best" params won't reproduce in non-sweep.
645
+ SWEEP_CONF_FLOOR = 0.0
646
+
647
+ def _final_dets_to_arrays_no_nms(
648
+ self,
649
+ preds: np.ndarray,
650
+ ratio: float,
651
+ pad: tuple[float, float],
652
+ orig_size: tuple[int, int],
653
+ score_floor: float,
654
+ ) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
655
+ if preds.ndim == 3 and preds.shape[0] == 1:
656
+ preds = preds[0]
657
+ if preds.ndim != 2 or preds.shape[1] < 6:
658
+ raise ValueError(f"Unexpected ONNX final-det output shape: {preds.shape}")
659
+ boxes = preds[:, :4].astype(np.float32)
660
+ scores = preds[:, 4].astype(np.float32)
661
+ cls_ids = self._remap_train_cls_ids(preds[:, 5].astype(np.int32))
662
+ keep = scores >= float(score_floor)
663
+ boxes = boxes[keep]
664
+ scores = scores[keep]
665
+ cls_ids = cls_ids[keep]
666
+ if len(boxes) == 0:
667
+ return (
668
+ np.empty((0, 4), dtype=np.float32),
669
+ np.empty((0,), dtype=np.float32),
670
+ np.empty((0,), dtype=np.int32),
671
+ )
672
+ pad_w, pad_h = pad
673
+ orig_w, orig_h = orig_size
674
+ boxes[:, [0, 2]] -= pad_w
675
+ boxes[:, [1, 3]] -= pad_h
676
+ boxes /= ratio
677
+ boxes = self._clip_boxes(boxes, (orig_w, orig_h))
678
+ return self._filter_sane_boxes(boxes, scores, cls_ids, orig_size)
679
+
680
+ def _raw_yolo_to_arrays_no_nms(
681
+ self,
682
+ preds: np.ndarray,
683
+ ratio: float,
684
+ pad: tuple[float, float],
685
+ orig_size: tuple[int, int],
686
+ score_floor: float,
687
+ ) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
688
+ if preds.ndim != 3:
689
+ raise ValueError(f"Unexpected raw ONNX output shape: {preds.shape}")
690
+ if preds.shape[0] != 1:
691
+ raise ValueError(f"Unexpected batch dimension in raw output: {preds.shape}")
692
+ preds = preds[0]
693
+ if preds.shape[0] <= 16 and preds.shape[1] > preds.shape[0]:
694
+ preds = preds.T
695
+ if preds.ndim != 2 or preds.shape[1] < 5:
696
+ raise ValueError(f"Unexpected normalized raw output shape: {preds.shape}")
697
+ boxes_xywh = preds[:, :4].astype(np.float32)
698
+ tail = preds[:, 4:].astype(np.float32)
699
+ if tail.shape[1] == 1:
700
+ scores = tail[:, 0]
701
+ cls_ids = np.zeros(len(scores), dtype=np.int32)
702
+ elif tail.shape[1] == 2:
703
+ obj = tail[:, 0]
704
+ cls_prob = tail[:, 1]
705
+ scores = obj * cls_prob
706
+ cls_ids = np.zeros(len(scores), dtype=np.int32)
707
+ else:
708
+ obj = tail[:, 0]
709
+ class_probs = tail[:, 1:]
710
+ cls_ids = np.argmax(class_probs, axis=1).astype(np.int32)
711
+ cls_scores = class_probs[np.arange(len(class_probs)), cls_ids]
712
+ scores = obj * cls_scores
713
+ cls_ids = self._remap_train_cls_ids(cls_ids)
714
+ keep = scores >= float(score_floor)
715
+ boxes_xywh = boxes_xywh[keep]
716
+ scores = scores[keep]
717
+ cls_ids = cls_ids[keep]
718
+ if len(boxes_xywh) == 0:
719
+ return (
720
+ np.empty((0, 4), dtype=np.float32),
721
+ np.empty((0,), dtype=np.float32),
722
+ np.empty((0,), dtype=np.int32),
723
+ )
724
+ boxes = self._xywh_to_xyxy(boxes_xywh)
725
+ pad_w, pad_h = pad
726
+ orig_w, orig_h = orig_size
727
+ boxes[:, [0, 2]] -= pad_w
728
+ boxes[:, [1, 3]] -= pad_h
729
+ boxes /= ratio
730
+ boxes = self._clip_boxes(boxes, (orig_w, orig_h))
731
+ return self._filter_sane_boxes(boxes, scores, cls_ids, orig_size)
732
+
733
+ def _postprocess_to_arrays_no_nms(
734
+ self,
735
+ output: np.ndarray,
736
+ ratio: float,
737
+ pad: tuple[float, float],
738
+ orig_size: tuple[int, int],
739
+ score_floor: float,
740
+ ) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
741
+ if output.ndim == 2 and output.shape[1] >= 6:
742
+ return self._final_dets_to_arrays_no_nms(output, ratio, pad, orig_size, score_floor)
743
+ if output.ndim == 3 and output.shape[0] == 1 and output.shape[2] >= 6:
744
+ return self._final_dets_to_arrays_no_nms(output, ratio, pad, orig_size, score_floor)
745
+ return self._raw_yolo_to_arrays_no_nms(output, ratio, pad, orig_size, score_floor)
746
+
747
+ @staticmethod
748
+ def _horizontal_flip_boxes_xyxy(boxes: np.ndarray, w: int) -> np.ndarray:
749
+ if len(boxes) == 0:
750
+ return boxes
751
+ out = boxes.astype(np.float32, copy=True)
752
+ x1 = out[:, 0].copy()
753
+ x2 = out[:, 2].copy()
754
+ out[:, 0] = w - x2
755
+ out[:, 2] = w - x1
756
+ return out
757
+
758
+ def build_vehicle_sweep_cache(self, image_bgr: np.ndarray) -> dict[str, Any]:
759
+ """Two ONNX forwards (TTA views); candidates kept at score >= SWEEP_CONF_FLOOR."""
760
+ if image_bgr.dtype != np.uint8:
761
+ image_bgr = image_bgr.astype(np.uint8)
762
+ h, w = int(image_bgr.shape[0]), int(image_bgr.shape[1])
763
+ input_tensor, ratio, pad, orig_size = self._preprocess(image_bgr)
764
+ outputs = self.session.run(self.output_names, {self.input_name: input_tensor})
765
+ bo, so, co = self._postprocess_to_arrays_no_nms(
766
+ outputs[0], ratio, pad, orig_size, self.SWEEP_CONF_FLOOR
767
+ )
768
+ flipped = cv2.flip(image_bgr, 1)
769
+ input_tensor_f, ratio_f, pad_f, orig_size_f = self._preprocess(flipped)
770
+ outputs_f = self.session.run(self.output_names, {self.input_name: input_tensor_f})
771
+ bf, sf, cf = self._postprocess_to_arrays_no_nms(
772
+ outputs_f[0], ratio_f, pad_f, orig_size_f, self.SWEEP_CONF_FLOOR
773
+ )
774
+ bf = self._horizontal_flip_boxes_xyxy(bf, w)
775
+ return {"orig": (bo, so, co), "flip": (bf, sf, cf), "image_shape": (h, w)}
776
+
777
+ def _arrays_to_boxes_after_conf_nms(
778
+ self,
779
+ boxes: np.ndarray,
780
+ scores: np.ndarray,
781
+ cls_ids: np.ndarray,
782
+ conf_thres: float,
783
+ iou_thres: float,
784
+ ) -> list[BoundingBox]:
785
+ if len(boxes) == 0:
786
+ return []
787
+ m = scores >= float(conf_thres)
788
+ if not np.any(m):
789
+ return []
790
+ boxes = boxes[m]
791
+ scores = scores[m]
792
+ cls_ids = cls_ids[m]
793
+ keep_idx = self._nms_per_class(boxes, scores, cls_ids, float(iou_thres), self.max_det)
794
+ out: list[BoundingBox] = []
795
+ for i in keep_idx:
796
+ box = boxes[i]
797
+ x1, y1, x2, y2 = float(box[0]), float(box[1]), float(box[2]), float(box[3])
798
+ if x2 <= x1 or y2 <= y1:
799
+ continue
800
+ out.append(
801
+ BoundingBox(
802
+ x1=int(math.floor(x1)),
803
+ y1=int(math.floor(y1)),
804
+ x2=int(math.ceil(x2)),
805
+ y2=int(math.ceil(y2)),
806
+ cls_id=int(cls_ids[i]),
807
+ conf=float(scores[i]),
808
+ )
809
+ )
810
+ return out
811
+
812
+ def predict_vehicle_from_sweep_cache(
813
+ self,
814
+ cache: dict[str, Any],
815
+ *,
816
+ conf_thres: float,
817
+ iou_thres: float,
818
+ conf_high: float | None = None,
819
+ tta_match_iou: float | None = None,
820
+ ) -> list[BoundingBox]:
821
+ bo, so, co = cache["orig"]
822
+ bf, sf, cf = cache["flip"]
823
+ boxes_orig = self._arrays_to_boxes_after_conf_nms(bo, so, co, conf_thres, iou_thres)
824
+ # Match predict_batch: single-view path when TTA is off (sweep cache must not force merge).
825
+ if not getattr(self, "use_tta", True):
826
+ return boxes_orig
827
+
828
+ boxes_flip = self._arrays_to_boxes_after_conf_nms(bf, sf, cf, conf_thres, iou_thres)
829
+ # Pass swept thresholds explicitly to avoid mutating self.* (race-free
830
+ # under any future concurrent sweep harness; also robust to early returns).
831
+ return self._merge_tta_consensus(
832
+ boxes_orig,
833
+ boxes_flip,
834
+ conf_high=conf_high,
835
+ tta_match_iou=tta_match_iou,
836
+ iou_thres=iou_thres,
837
+ )
838
+
839
+ def predict_batch(
840
+ self,
841
+ batch_images: list[ndarray],
842
+ offset: int,
843
+ n_keypoints: int,
844
+ ) -> list[TVFrameResult]:
845
+ results: list[TVFrameResult] = []
846
+
847
+ for frame_number_in_batch, image in enumerate(batch_images):
848
+ try:
849
+ if self.use_tta:
850
+ boxes = self._predict_tta(image)
851
+ else:
852
+ boxes = self._predict_single(image)
853
+ except Exception as e:
854
+ print(f"⚠️ Inference failed for frame {offset + frame_number_in_batch}: {e}")
855
+ boxes = []
856
+
857
+ results.append(
858
+ TVFrameResult(
859
+ frame_id=offset + frame_number_in_batch,
860
+ boxes=boxes,
861
+ keypoints=[(0, 0) for _ in range(max(0, int(n_keypoints)))],
862
+ )
863
+ )
864
+
865
+ return results
weights.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:71009e2a8381b151383d0f6d1c87b2d0af7911664a267a30e3e7160143a83f94
3
+ size 19157269