iotaminer commited on
Commit
aab44f5
·
verified ·
1 Parent(s): 8e3ff0e

scorevision: push artifact

Browse files
Files changed (1) hide show
  1. miner.py +175 -692
miner.py CHANGED
@@ -1,692 +1,175 @@
1
- from pathlib import Path
2
- import math
3
-
4
- import cv2
5
- import numpy as np
6
- import onnxruntime as ort
7
- from numpy import ndarray
8
- from pydantic import BaseModel
9
-
10
-
11
- class BoundingBox(BaseModel):
12
- x1: int
13
- y1: int
14
- x2: int
15
- y2: int
16
- cls_id: int
17
- conf: float
18
-
19
-
20
- class TVFrameResult(BaseModel):
21
- frame_id: int
22
- boxes: list[BoundingBox]
23
- keypoints: list[tuple[int, int]]
24
-
25
-
26
- class Miner:
27
- def __init__(self,
28
- path_hf_repo: Path
29
- ) -> None:
30
- model_path = path_hf_repo / "weights.onnx"
31
- self.class_names = ['bus', 'car', 'truck', 'motorcycle']
32
- model_class_order = ["car", "bus", "truck", "motorcycle"]
33
- self.cls_remap = np.array(
34
- [self.class_names.index(n) for n in model_class_order], dtype=np.int32
35
- )
36
- print("ORT version:", ort.__version__)
37
-
38
- try:
39
- ort.preload_dlls()
40
- print("✅ onnxruntime.preload_dlls() success")
41
- except Exception as e:
42
- print(f"⚠️ preload_dlls failed: {e}")
43
-
44
- print("ORT available providers BEFORE session:", ort.get_available_providers())
45
-
46
- sess_options = ort.SessionOptions()
47
- sess_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL
48
-
49
- try:
50
- self.session = ort.InferenceSession(
51
- str(model_path),
52
- sess_options=sess_options,
53
- providers=["CUDAExecutionProvider", "CPUExecutionProvider"],
54
- )
55
- print("✅ Created ORT session with preferred CUDA provider list")
56
- except Exception as e:
57
- print(f"⚠️ CUDA session creation failed, falling back to CPU: {e}")
58
- self.session = ort.InferenceSession(
59
- str(model_path),
60
- sess_options=sess_options,
61
- providers=["CPUExecutionProvider"],
62
- )
63
-
64
- print("ORT session providers:", self.session.get_providers())
65
-
66
- for inp in self.session.get_inputs():
67
- print("INPUT:", inp.name, inp.shape, inp.type)
68
-
69
- for out in self.session.get_outputs():
70
- print("OUTPUT:", out.name, out.shape, out.type)
71
-
72
- self.input_name = self.session.get_inputs()[0].name
73
- self.output_names = [output.name for output in self.session.get_outputs()]
74
- self.input_shape = self.session.get_inputs()[0].shape
75
-
76
- # Your export is fixed-size 1280, but we still read actual ONNX input shape first.
77
- self.input_height = self._safe_dim(self.input_shape[2], default=960)
78
- self.input_width = self._safe_dim(self.input_shape[3], default=960)
79
-
80
- # Tuned for validator scoring: reduce FP (FALSE_POSITIVE pillar),
81
- # preserve recall (MAP50, RECALL), improve precision.
82
- self.conf_thres = 0.3 # Higher = fewer FP, slightly lower recall
83
- self.iou_thres = 0.5 # Lower = suppress duplicate detections (FP)
84
- self.max_det = 150 # Cap detections per image
85
- self.use_tta = True
86
-
87
- # Box sanity: filter tiny/spurious detections (common FP source)
88
- self.min_box_area = 14 * 14 # ~144 px²
89
- self.min_side = 8
90
- self.max_aspect_ratio = 8.0
91
-
92
- print(f"✅ ONNX model loaded from: {model_path}")
93
- print(f"✅ ONNX providers: {self.session.get_providers()}")
94
- print(f"✅ ONNX input: name={self.input_name}, shape={self.input_shape}")
95
-
96
- def __repr__(self) -> str:
97
- return (
98
- f"ONNXRuntime(session={type(self.session).__name__}, "
99
- f"providers={self.session.get_providers()})"
100
- )
101
-
102
- @staticmethod
103
- def _safe_dim(value, default: int) -> int:
104
- return value if isinstance(value, int) and value > 0 else default
105
-
106
- def _letterbox(
107
- self,
108
- image: ndarray,
109
- new_shape: tuple[int, int],
110
- color=(114, 114, 114),
111
- ) -> tuple[ndarray, float, tuple[float, float]]:
112
- """
113
- Resize with unchanged aspect ratio and pad to target shape.
114
- Returns:
115
- padded_image,
116
- ratio,
117
- (pad_w, pad_h) # half-padding
118
- """
119
- h, w = image.shape[:2]
120
- new_w, new_h = new_shape
121
-
122
- ratio = min(new_w / w, new_h / h)
123
- resized_w = int(round(w * ratio))
124
- resized_h = int(round(h * ratio))
125
-
126
- if (resized_w, resized_h) != (w, h):
127
- interp = cv2.INTER_CUBIC if ratio > 1.0 else cv2.INTER_LINEAR
128
- image = cv2.resize(image, (resized_w, resized_h), interpolation=interp)
129
-
130
- dw = new_w - resized_w
131
- dh = new_h - resized_h
132
- dw /= 2.0
133
- dh /= 2.0
134
-
135
- left = int(round(dw - 0.1))
136
- right = int(round(dw + 0.1))
137
- top = int(round(dh - 0.1))
138
- bottom = int(round(dh + 0.1))
139
-
140
- padded = cv2.copyMakeBorder(
141
- image,
142
- top,
143
- bottom,
144
- left,
145
- right,
146
- borderType=cv2.BORDER_CONSTANT,
147
- value=color,
148
- )
149
- return padded, ratio, (dw, dh)
150
-
151
- def _preprocess(
152
- self, image: ndarray
153
- ) -> tuple[np.ndarray, float, tuple[float, float], tuple[int, int]]:
154
- """
155
- Preprocess for fixed-size ONNX export:
156
- - enhance image quality (CLAHE, denoise, sharpen)
157
- - letterbox to model input size
158
- - BGR -> RGB
159
- - normalize to [0,1]
160
- - HWC -> NCHW float32
161
- """
162
- orig_h, orig_w = image.shape[:2]
163
-
164
- img, ratio, pad = self._letterbox(
165
- image, (self.input_width, self.input_height)
166
- )
167
- img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
168
- img = img.astype(np.float32) / 255.0
169
- img = np.transpose(img, (2, 0, 1))[None, ...]
170
- img = np.ascontiguousarray(img, dtype=np.float32)
171
-
172
- return img, ratio, pad, (orig_w, orig_h)
173
-
174
- @staticmethod
175
- def _clip_boxes(boxes: np.ndarray, image_size: tuple[int, int]) -> np.ndarray:
176
- w, h = image_size
177
- boxes[:, 0] = np.clip(boxes[:, 0], 0, w - 1)
178
- boxes[:, 1] = np.clip(boxes[:, 1], 0, h - 1)
179
- boxes[:, 2] = np.clip(boxes[:, 2], 0, w - 1)
180
- boxes[:, 3] = np.clip(boxes[:, 3], 0, h - 1)
181
- return boxes
182
-
183
- @staticmethod
184
- def _xywh_to_xyxy(boxes: np.ndarray) -> np.ndarray:
185
- out = np.empty_like(boxes)
186
- out[:, 0] = boxes[:, 0] - boxes[:, 2] / 2.0
187
- out[:, 1] = boxes[:, 1] - boxes[:, 3] / 2.0
188
- out[:, 2] = boxes[:, 0] + boxes[:, 2] / 2.0
189
- out[:, 3] = boxes[:, 1] + boxes[:, 3] / 2.0
190
- return out
191
-
192
- def _soft_nms(
193
- self,
194
- boxes: np.ndarray,
195
- scores: np.ndarray,
196
- sigma: float = 0.5,
197
- score_thresh: float = 0.01,
198
- ) -> tuple[np.ndarray, np.ndarray]:
199
- """
200
- Soft-NMS: Gaussian decay of overlapping scores instead of hard removal.
201
- Returns (kept_original_indices, updated_scores).
202
- """
203
- N = len(boxes)
204
- if N == 0:
205
- return np.array([], dtype=np.intp), np.array([], dtype=np.float32)
206
-
207
- boxes = boxes.astype(np.float32, copy=True)
208
- scores = scores.astype(np.float32, copy=True)
209
- order = np.arange(N)
210
-
211
- for i in range(N):
212
- max_pos = i + int(np.argmax(scores[i:]))
213
- boxes[[i, max_pos]] = boxes[[max_pos, i]]
214
- scores[[i, max_pos]] = scores[[max_pos, i]]
215
- order[[i, max_pos]] = order[[max_pos, i]]
216
-
217
- if i + 1 >= N:
218
- break
219
-
220
- xx1 = np.maximum(boxes[i, 0], boxes[i + 1:, 0])
221
- yy1 = np.maximum(boxes[i, 1], boxes[i + 1:, 1])
222
- xx2 = np.minimum(boxes[i, 2], boxes[i + 1:, 2])
223
- yy2 = np.minimum(boxes[i, 3], boxes[i + 1:, 3])
224
- inter = np.maximum(0.0, xx2 - xx1) * np.maximum(0.0, yy2 - yy1)
225
-
226
- area_i = max(0.0, float(
227
- (boxes[i, 2] - boxes[i, 0]) * (boxes[i, 3] - boxes[i, 1])
228
- ))
229
- areas_j = (
230
- np.maximum(0.0, boxes[i + 1:, 2] - boxes[i + 1:, 0])
231
- * np.maximum(0.0, boxes[i + 1:, 3] - boxes[i + 1:, 1])
232
- )
233
- iou = inter / (area_i + areas_j - inter + 1e-7)
234
- scores[i + 1:] *= np.exp(-(iou ** 2) / sigma)
235
-
236
- mask = scores > score_thresh
237
- return order[mask], scores[mask]
238
-
239
- @staticmethod
240
- def _hard_nms(
241
- boxes: np.ndarray,
242
- scores: np.ndarray,
243
- iou_thresh: float,
244
- ) -> np.ndarray:
245
- """
246
- Standard NMS: keep one box per overlapping cluster (the one with highest score).
247
- Returns indices of kept boxes (into the boxes/scores arrays).
248
- """
249
- N = len(boxes)
250
- if N == 0:
251
- return np.array([], dtype=np.intp)
252
- boxes = np.asarray(boxes, dtype=np.float32)
253
- scores = np.asarray(scores, dtype=np.float32)
254
- order = np.argsort(scores)[::-1]
255
- keep: list[int] = []
256
- suppressed = np.zeros(N, dtype=bool)
257
- for i in range(N):
258
- idx = order[i]
259
- if suppressed[idx]:
260
- continue
261
- keep.append(idx)
262
- bi = boxes[idx]
263
- for k in range(i + 1, N):
264
- jdx = order[k]
265
- if suppressed[jdx]:
266
- continue
267
- bj = boxes[jdx]
268
- xx1 = max(bi[0], bj[0])
269
- yy1 = max(bi[1], bj[1])
270
- xx2 = min(bi[2], bj[2])
271
- yy2 = min(bi[3], bj[3])
272
- inter = max(0.0, xx2 - xx1) * max(0.0, yy2 - yy1)
273
- area_i = (bi[2] - bi[0]) * (bi[3] - bi[1])
274
- area_j = (bj[2] - bj[0]) * (bj[3] - bj[1])
275
- iou = inter / (area_i + area_j - inter + 1e-7)
276
- if iou > iou_thresh:
277
- suppressed[jdx] = True
278
- return np.array(keep)
279
-
280
- def _per_class_hard_nms(
281
- self,
282
- boxes: np.ndarray,
283
- scores: np.ndarray,
284
- cls_ids: np.ndarray,
285
- iou_thresh: float,
286
- ) -> np.ndarray:
287
- """Hard NMS applied independently per class."""
288
- if len(boxes) == 0:
289
- return np.array([], dtype=np.intp)
290
- all_keep: list[int] = []
291
- for c in np.unique(cls_ids):
292
- mask = cls_ids == c
293
- indices = np.where(mask)[0]
294
- keep = self._hard_nms(boxes[mask], scores[mask], iou_thresh)
295
- all_keep.extend(indices[keep].tolist())
296
- all_keep.sort()
297
- return np.array(all_keep, dtype=np.intp)
298
-
299
- def _per_class_soft_nms(
300
- self,
301
- boxes: np.ndarray,
302
- scores: np.ndarray,
303
- cls_ids: np.ndarray,
304
- sigma: float = 0.5,
305
- score_thresh: float = 0.01,
306
- ) -> tuple[np.ndarray, np.ndarray]:
307
- """Soft NMS applied independently per class."""
308
- if len(boxes) == 0:
309
- return np.array([], dtype=np.intp), np.array([], dtype=np.float32)
310
- all_keep: list[int] = []
311
- all_scores: list[float] = []
312
- for c in np.unique(cls_ids):
313
- mask = cls_ids == c
314
- indices = np.where(mask)[0]
315
- keep, updated = self._soft_nms(boxes[mask], scores[mask], sigma, score_thresh)
316
- for k, s in zip(keep, updated):
317
- all_keep.append(int(indices[k]))
318
- all_scores.append(float(s))
319
- if not all_keep:
320
- return np.array([], dtype=np.intp), np.array([], dtype=np.float32)
321
- return np.array(all_keep, dtype=np.intp), np.array(all_scores, dtype=np.float32)
322
-
323
- def _filter_sane_boxes(
324
- self,
325
- boxes: np.ndarray,
326
- scores: np.ndarray,
327
- cls_ids: np.ndarray,
328
- orig_size: tuple[int, int],
329
- ) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
330
- """Filter out tiny, degenerate, or implausible boxes (common FP)."""
331
- if len(boxes) == 0:
332
- return boxes, scores, cls_ids
333
- orig_w, orig_h = orig_size
334
- image_area = float(orig_w * orig_h)
335
- keep = []
336
- for i, box in enumerate(boxes):
337
- x1, y1, x2, y2 = box.tolist()
338
- bw = x2 - x1
339
- bh = y2 - y1
340
- if bw <= 0 or bh <= 0:
341
- continue
342
- if bw < self.min_side or bh < self.min_side:
343
- continue
344
- area = bw * bh
345
- if area < self.min_box_area:
346
- continue
347
- if area > 0.95 * image_area:
348
- continue
349
- ar = max(bw / max(bh, 1e-6), bh / max(bw, 1e-6))
350
- if ar > self.max_aspect_ratio:
351
- continue
352
- keep.append(i)
353
- if not keep:
354
- return (
355
- np.empty((0, 4), dtype=np.float32),
356
- np.empty((0,), dtype=np.float32),
357
- np.empty((0,), dtype=np.int32),
358
- )
359
- k = np.array(keep, dtype=np.intp)
360
- return boxes[k], scores[k], cls_ids[k]
361
-
362
- @staticmethod
363
- def _max_score_per_cluster(
364
- coords: np.ndarray,
365
- scores: np.ndarray,
366
- keep_indices: np.ndarray,
367
- iou_thresh: float,
368
- ) -> np.ndarray:
369
- """
370
- For each kept box, return the max original score among itself and any
371
- box that overlaps it with IOU >= iou_thresh (so TTA cluster keeps best conf).
372
- """
373
- n_keep = len(keep_indices)
374
- if n_keep == 0:
375
- return np.array([], dtype=np.float32)
376
- out = np.empty(n_keep, dtype=np.float32)
377
- coords = np.asarray(coords, dtype=np.float32)
378
- scores = np.asarray(scores, dtype=np.float32)
379
- for i in range(n_keep):
380
- idx = keep_indices[i]
381
- bi = coords[idx]
382
- xx1 = np.maximum(bi[0], coords[:, 0])
383
- yy1 = np.maximum(bi[1], coords[:, 1])
384
- xx2 = np.minimum(bi[2], coords[:, 2])
385
- yy2 = np.minimum(bi[3], coords[:, 3])
386
- inter = np.maximum(0.0, xx2 - xx1) * np.maximum(0.0, yy2 - yy1)
387
- area_i = (bi[2] - bi[0]) * (bi[3] - bi[1])
388
- areas_j = (coords[:, 2] - coords[:, 0]) * (coords[:, 3] - coords[:, 1])
389
- iou = inter / (area_i + areas_j - inter + 1e-7)
390
- in_cluster = iou >= iou_thresh
391
- out[i] = float(np.max(scores[in_cluster]))
392
- return out
393
-
394
- def _decode_final_dets(
395
- self,
396
- preds: np.ndarray,
397
- ratio: float,
398
- pad: tuple[float, float],
399
- orig_size: tuple[int, int],
400
- apply_optional_dedup: bool = False,
401
- ) -> list[BoundingBox]:
402
- """
403
- Primary path:
404
- expected output rows like [x1, y1, x2, y2, conf, cls_id]
405
- in letterboxed input coordinates.
406
- """
407
- if preds.ndim == 3 and preds.shape[0] == 1:
408
- preds = preds[0]
409
-
410
- if preds.ndim != 2 or preds.shape[1] < 6:
411
- raise ValueError(f"Unexpected ONNX final-det output shape: {preds.shape}")
412
-
413
- boxes = preds[:, :4].astype(np.float32)
414
- scores = preds[:, 4].astype(np.float32)
415
- cls_ids = preds[:, 5].astype(np.int32)
416
- cls_ids = self.cls_remap[cls_ids]
417
-
418
- keep = scores >= self.conf_thres
419
- boxes = boxes[keep]
420
- scores = scores[keep]
421
- cls_ids = cls_ids[keep]
422
-
423
- if len(boxes) == 0:
424
- return []
425
-
426
- pad_w, pad_h = pad
427
- orig_w, orig_h = orig_size
428
-
429
- # reverse letterbox
430
- boxes[:, [0, 2]] -= pad_w
431
- boxes[:, [1, 3]] -= pad_h
432
- boxes /= ratio
433
- boxes = self._clip_boxes(boxes, (orig_w, orig_h))
434
-
435
- # Box sanity filter (reduces FP)
436
- boxes, scores, cls_ids = self._filter_sane_boxes(
437
- boxes, scores, cls_ids, orig_size
438
- )
439
- if len(boxes) == 0:
440
- return []
441
-
442
- # Per-class NMS to remove duplicates without suppressing across classes
443
- if len(boxes) > 1:
444
- if apply_optional_dedup:
445
- keep_idx, scores = self._per_class_soft_nms(boxes, scores, cls_ids)
446
- boxes = boxes[keep_idx]
447
- cls_ids = cls_ids[keep_idx]
448
- else:
449
- keep_idx = self._per_class_hard_nms(boxes, scores, cls_ids, self.iou_thres)
450
- keep_idx = keep_idx[: self.max_det]
451
- boxes = boxes[keep_idx]
452
- scores = scores[keep_idx]
453
- cls_ids = cls_ids[keep_idx]
454
-
455
- results: list[BoundingBox] = []
456
- for box, conf, cls_id in zip(boxes, scores, cls_ids):
457
- x1, y1, x2, y2 = box.tolist()
458
-
459
- if x2 <= x1 or y2 <= y1:
460
- continue
461
-
462
- results.append(
463
- BoundingBox(
464
- x1=int(math.floor(x1)),
465
- y1=int(math.floor(y1)),
466
- x2=int(math.ceil(x2)),
467
- y2=int(math.ceil(y2)),
468
- cls_id=int(cls_id),
469
- conf=float(conf),
470
- )
471
- )
472
-
473
- return results
474
-
475
- def _decode_raw_yolo(
476
- self,
477
- preds: np.ndarray,
478
- ratio: float,
479
- pad: tuple[float, float],
480
- orig_size: tuple[int, int],
481
- ) -> list[BoundingBox]:
482
- """
483
- Fallback path for raw YOLO predictions.
484
- Supports common layouts:
485
- - [1, C, N]
486
- - [1, N, C]
487
- """
488
- if preds.ndim != 3:
489
- raise ValueError(f"Unexpected raw ONNX output shape: {preds.shape}")
490
-
491
- if preds.shape[0] != 1:
492
- raise ValueError(f"Unexpected batch dimension in raw output: {preds.shape}")
493
-
494
- preds = preds[0]
495
-
496
- # Normalize to [N, C]
497
- if preds.shape[0] <= 16 and preds.shape[1] > preds.shape[0]:
498
- preds = preds.T
499
-
500
- if preds.ndim != 2 or preds.shape[1] < 5:
501
- raise ValueError(f"Unexpected normalized raw output shape: {preds.shape}")
502
-
503
- boxes_xywh = preds[:, :4].astype(np.float32)
504
- cls_part = preds[:, 4:].astype(np.float32)
505
-
506
- if cls_part.shape[1] == 1:
507
- scores = cls_part[:, 0]
508
- cls_ids = np.zeros(len(scores), dtype=np.int32)
509
- else:
510
- cls_ids = np.argmax(cls_part, axis=1).astype(np.int32)
511
- scores = cls_part[np.arange(len(cls_part)), cls_ids]
512
- cls_ids = self.cls_remap[cls_ids]
513
-
514
- keep = scores >= self.conf_thres
515
- boxes_xywh = boxes_xywh[keep]
516
- scores = scores[keep]
517
- cls_ids = cls_ids[keep]
518
-
519
- if len(boxes_xywh) == 0:
520
- return []
521
-
522
- boxes = self._xywh_to_xyxy(boxes_xywh)
523
-
524
- keep_idx = self._per_class_hard_nms(boxes, scores, cls_ids, self.iou_thres)
525
- keep_idx = keep_idx[: self.max_det]
526
- boxes = boxes[keep_idx]
527
- scores = scores[keep_idx]
528
- cls_ids = cls_ids[keep_idx]
529
-
530
- pad_w, pad_h = pad
531
- orig_w, orig_h = orig_size
532
-
533
- boxes[:, [0, 2]] -= pad_w
534
- boxes[:, [1, 3]] -= pad_h
535
- boxes /= ratio
536
- boxes = self._clip_boxes(boxes, (orig_w, orig_h))
537
-
538
- boxes, scores, cls_ids = self._filter_sane_boxes(
539
- boxes, scores, cls_ids, (orig_w, orig_h)
540
- )
541
- if len(boxes) == 0:
542
- return []
543
-
544
- results: list[BoundingBox] = []
545
- for box, conf, cls_id in zip(boxes, scores, cls_ids):
546
- x1, y1, x2, y2 = box.tolist()
547
-
548
- if x2 <= x1 or y2 <= y1:
549
- continue
550
-
551
- results.append(
552
- BoundingBox(
553
- x1=int(math.floor(x1)),
554
- y1=int(math.floor(y1)),
555
- x2=int(math.ceil(x2)),
556
- y2=int(math.ceil(y2)),
557
- cls_id=int(cls_id),
558
- conf=float(conf),
559
- )
560
- )
561
-
562
- return results
563
-
564
- def _postprocess(
565
- self,
566
- output: np.ndarray,
567
- ratio: float,
568
- pad: tuple[float, float],
569
- orig_size: tuple[int, int],
570
- ) -> list[BoundingBox]:
571
- """
572
- Prefer final detections first.
573
- Fallback to raw decode only if needed.
574
- """
575
- # final detections: [N,6]
576
- if output.ndim == 2 and output.shape[1] >= 6:
577
- return self._decode_final_dets(output, ratio, pad, orig_size)
578
-
579
- # final detections: [1,N,6]
580
- if output.ndim == 3 and output.shape[0] == 1 and output.shape[2] == 6:
581
- return self._decode_final_dets(output, ratio, pad, orig_size)
582
-
583
- # fallback raw decode
584
- return self._decode_raw_yolo(output, ratio, pad, orig_size)
585
-
586
- def _predict_single(self, image: np.ndarray) -> list[BoundingBox]:
587
- if image is None:
588
- raise ValueError("Input image is None")
589
- if not isinstance(image, np.ndarray):
590
- raise TypeError(f"Input is not numpy array: {type(image)}")
591
- if image.ndim != 3:
592
- raise ValueError(f"Expected HWC image, got shape={image.shape}")
593
- if image.shape[0] <= 0 or image.shape[1] <= 0:
594
- raise ValueError(f"Invalid image shape={image.shape}")
595
- if image.shape[2] != 3:
596
- raise ValueError(f"Expected 3 channels, got shape={image.shape}")
597
-
598
- if image.dtype != np.uint8:
599
- image = image.astype(np.uint8)
600
-
601
- input_tensor, ratio, pad, orig_size = self._preprocess(image)
602
-
603
- expected_shape = (1, 3, self.input_height, self.input_width)
604
- if input_tensor.shape != expected_shape:
605
- raise ValueError(
606
- f"Bad input tensor shape={input_tensor.shape}, expected={expected_shape}"
607
- )
608
-
609
- outputs = self.session.run(self.output_names, {self.input_name: input_tensor})
610
- det_output = outputs[0]
611
- return self._postprocess(det_output, ratio, pad, orig_size)
612
-
613
- def _predict_tta(self, image: np.ndarray) -> list[BoundingBox]:
614
- """
615
- Horizontal-flip TTA: merge original + flipped via hard NMS.
616
- Boost confidence for consensus detections (both views agree) to improve
617
- mAP: validator sorts by confidence, so higher conf for TP helps PR curve.
618
- """
619
- boxes_orig = self._predict_single(image)
620
-
621
- flipped = cv2.flip(image, 1)
622
- boxes_flip = self._predict_single(flipped)
623
-
624
- w = image.shape[1]
625
- boxes_flip = [
626
- BoundingBox(
627
- x1=w - b.x2, y1=b.y1, x2=w - b.x1, y2=b.y2,
628
- cls_id=b.cls_id, conf=b.conf,
629
- )
630
- for b in boxes_flip
631
- ]
632
-
633
- all_boxes = boxes_orig + boxes_flip
634
- if len(all_boxes) == 0:
635
- return []
636
-
637
- coords = np.array(
638
- [[b.x1, b.y1, b.x2, b.y2] for b in all_boxes], dtype=np.float32
639
- )
640
- scores = np.array([b.conf for b in all_boxes], dtype=np.float32)
641
- cls_ids = np.array([b.cls_id for b in all_boxes], dtype=np.int32)
642
-
643
- hard_keep = self._per_class_hard_nms(coords, scores, cls_ids, self.iou_thres)
644
- if len(hard_keep) == 0:
645
- return []
646
-
647
- hard_keep = hard_keep[: self.max_det]
648
-
649
- # Boost confidence when both views agree (overlapping detections)
650
- boosted = self._max_score_per_cluster(
651
- coords, scores, hard_keep, self.iou_thres
652
- )
653
-
654
- return [
655
- BoundingBox(
656
- x1=all_boxes[i].x1,
657
- y1=all_boxes[i].y1,
658
- x2=all_boxes[i].x2,
659
- y2=all_boxes[i].y2,
660
- cls_id=all_boxes[i].cls_id,
661
- conf=float(boosted[j]),
662
- )
663
- for j, i in enumerate(hard_keep)
664
- ]
665
-
666
- def predict_batch(
667
- self,
668
- batch_images: list[ndarray],
669
- offset: int,
670
- n_keypoints: int,
671
- ) -> list[TVFrameResult]:
672
- results: list[TVFrameResult] = []
673
-
674
- for frame_number_in_batch, image in enumerate(batch_images):
675
- try:
676
- if self.use_tta:
677
- boxes = self._predict_tta(image)
678
- else:
679
- boxes = self._predict_single(image)
680
- except Exception as e:
681
- print(f"⚠️ Inference failed for frame {offset + frame_number_in_batch}: {e}")
682
- boxes = []
683
-
684
- results.append(
685
- TVFrameResult(
686
- frame_id=offset + frame_number_in_batch,
687
- boxes=boxes,
688
- keypoints=[(0, 0) for _ in range(max(0, int(n_keypoints)))],
689
- )
690
- )
691
-
692
- return results
 
1
+ from pathlib import Path
2
+ import math
3
+
4
+ import cv2
5
+ import numpy as np
6
+ import onnxruntime as ort
7
+ from numpy import ndarray
8
+ from pydantic import BaseModel
9
+
10
+
11
+ class BoundingBox(BaseModel):
12
+ x1: int
13
+ y1: int
14
+ x2: int
15
+ y2: int
16
+ cls_id: int
17
+ conf: float
18
+
19
+
20
+ class TVFrameResult(BaseModel):
21
+ frame_id: int
22
+ boxes: list[BoundingBox]
23
+ keypoints: list[tuple[int, int]]
24
+
25
+
26
+ class Miner:
27
+ """
28
+ Auto-generated by subnet_bridge from a Manako element repo.
29
+ This miner is intentionally self-contained for chute import restrictions.
30
+ """
31
+
32
+ def __init__(self, path_hf_repo: Path) -> None:
33
+ self.path_hf_repo = path_hf_repo
34
+ self.class_names = ['petrol hose', 'petrol pump', 'price board', 'roof canopy']
35
+ self.session = ort.InferenceSession(
36
+ str(path_hf_repo / "weights.onnx"),
37
+ providers=["CUDAExecutionProvider", "CPUExecutionProvider"],
38
+ )
39
+ self.input_name = self.session.get_inputs()[0].name
40
+ input_shape = self.session.get_inputs()[0].shape
41
+ # expected [N, C, H, W]
42
+ self.input_h = int(input_shape[2])
43
+ self.input_w = int(input_shape[3])
44
+ self.conf_threshold = 0.25
45
+ self.iou_threshold = 0.45
46
+
47
+ def __repr__(self) -> str:
48
+ return f"ONNX Miner session={type(self.session).__name__} classes={len(self.class_names)}"
49
+
50
+ def _preprocess(self, image_bgr: ndarray) -> tuple[np.ndarray, tuple[int, int]]:
51
+ h, w = image_bgr.shape[:2]
52
+ rgb = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB)
53
+ resized = cv2.resize(rgb, (self.input_w, self.input_h))
54
+ x = resized.astype(np.float32) / 255.0
55
+ x = np.transpose(x, (2, 0, 1))[None, ...]
56
+ return x, (h, w)
57
+
58
+ def _normalize_predictions(self, raw: np.ndarray) -> np.ndarray:
59
+ # Common ultralytics export shapes:
60
+ # - [1, C, N] where C=4+num_classes
61
+ # - [1, N, C]
62
+ pred = raw[0]
63
+ if pred.ndim != 2:
64
+ raise ValueError(f"Unexpected prediction shape: {raw.shape}")
65
+ if pred.shape[0] < pred.shape[1]:
66
+ pred = pred.transpose(1, 0)
67
+ return pred
68
+
69
+ def _nms(self, dets: list[tuple[float, float, float, float, float, int]]) -> list[tuple[float, float, float, float, float, int]]:
70
+ if not dets:
71
+ return []
72
+
73
+ boxes = np.array([[d[0], d[1], d[2], d[3]] for d in dets], dtype=np.float32)
74
+ scores = np.array([d[4] for d in dets], dtype=np.float32)
75
+ order = scores.argsort()[::-1]
76
+ keep = []
77
+
78
+ while order.size > 0:
79
+ i = order[0]
80
+ keep.append(i)
81
+
82
+ xx1 = np.maximum(boxes[i, 0], boxes[order[1:], 0])
83
+ yy1 = np.maximum(boxes[i, 1], boxes[order[1:], 1])
84
+ xx2 = np.minimum(boxes[i, 2], boxes[order[1:], 2])
85
+ yy2 = np.minimum(boxes[i, 3], boxes[order[1:], 3])
86
+
87
+ w = np.maximum(0.0, xx2 - xx1)
88
+ h = np.maximum(0.0, yy2 - yy1)
89
+ inter = w * h
90
+
91
+ area_i = (boxes[i, 2] - boxes[i, 0]) * (boxes[i, 3] - boxes[i, 1])
92
+ area_rest = (boxes[order[1:], 2] - boxes[order[1:], 0]) * (boxes[order[1:], 3] - boxes[order[1:], 1])
93
+ union = np.maximum(area_i + area_rest - inter, 1e-6)
94
+ iou = inter / union
95
+
96
+ remaining = np.where(iou <= self.iou_threshold)[0]
97
+ order = order[remaining + 1]
98
+
99
+ return [dets[idx] for idx in keep]
100
+
101
+ def _infer_single(self, image_bgr: ndarray) -> list[BoundingBox]:
102
+ inp, (orig_h, orig_w) = self._preprocess(image_bgr)
103
+ out = self.session.run(None, {self.input_name: inp})[0]
104
+ pred = self._normalize_predictions(out)
105
+
106
+ if pred.shape[1] < 5:
107
+ return []
108
+
109
+ boxes = pred[:, :4]
110
+ cls_scores = pred[:, 4:]
111
+
112
+ if cls_scores.shape[1] == 0:
113
+ return []
114
+
115
+ cls_ids = np.argmax(cls_scores, axis=1)
116
+ confs = np.max(cls_scores, axis=1)
117
+ keep = confs >= self.conf_threshold
118
+
119
+ boxes = boxes[keep]
120
+ confs = confs[keep]
121
+ cls_ids = cls_ids[keep]
122
+
123
+ if boxes.shape[0] == 0:
124
+ return []
125
+
126
+ sx = orig_w / float(self.input_w)
127
+ sy = orig_h / float(self.input_h)
128
+
129
+ dets: list[tuple[float, float, float, float, float, int]] = []
130
+ for i in range(boxes.shape[0]):
131
+ cx, cy, bw, bh = boxes[i].tolist()
132
+ x1 = (cx - bw / 2.0) * sx
133
+ y1 = (cy - bh / 2.0) * sy
134
+ x2 = (cx + bw / 2.0) * sx
135
+ y2 = (cy + bh / 2.0) * sy
136
+ dets.append((x1, y1, x2, y2, float(confs[i]), int(cls_ids[i])))
137
+
138
+ dets = self._nms(dets)
139
+
140
+ out_boxes: list[BoundingBox] = []
141
+ for x1, y1, x2, y2, conf, cls_id in dets:
142
+ ix1 = max(0, min(orig_w, math.floor(x1)))
143
+ iy1 = max(0, min(orig_h, math.floor(y1)))
144
+ ix2 = max(0, min(orig_w, math.ceil(x2)))
145
+ iy2 = max(0, min(orig_h, math.ceil(y2)))
146
+ out_boxes.append(
147
+ BoundingBox(
148
+ x1=ix1,
149
+ y1=iy1,
150
+ x2=ix2,
151
+ y2=iy2,
152
+ cls_id=cls_id,
153
+ conf=max(0.0, min(1.0, conf)),
154
+ )
155
+ )
156
+ return out_boxes
157
+
158
+ def predict_batch(
159
+ self,
160
+ batch_images: list[ndarray],
161
+ offset: int,
162
+ n_keypoints: int,
163
+ ) -> list[TVFrameResult]:
164
+ results: list[TVFrameResult] = []
165
+ for idx, image in enumerate(batch_images):
166
+ boxes = self._infer_single(image)
167
+ keypoints = [(0, 0) for _ in range(max(0, int(n_keypoints)))]
168
+ results.append(
169
+ TVFrameResult(
170
+ frame_id=offset + idx,
171
+ boxes=boxes,
172
+ keypoints=keypoints,
173
+ )
174
+ )
175
+ return results