meaculpitt commited on
Commit
68c8d9e
Β·
verified Β·
1 Parent(s): 86e7984

scorevision: push artifact

Browse files
Files changed (1) hide show
  1. miner.py +103 -5
miner.py CHANGED
@@ -52,6 +52,104 @@ def _preload_cuda_libs():
52
 
53
  _preload_cuda_libs()
54
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
 
56
  from pathlib import Path
57
  import math
@@ -75,15 +173,15 @@ logger = logging.getLogger(__name__)
75
  VEH_MODEL_TO_OUT: dict[int, int] = {0: 1, 1: 0, 2: 2, 3: 3}
76
  VEH_NUM_CLASSES = 4
77
  # VEH_IMG_SIZE: now read dynamically from model input shape in __init__
78
- VEH_CONF_PER_CLASS = {0: 0.33, 1: 0.50, 2: 0.40, 3: 0.36}
79
  VEH_CONF_DEFAULT = 0.35
80
- VEH_TTA_CONF = 0.25
81
- VEH_WBF_IOU = 0.55
82
 
83
  # ── Person config ───────────────────────────────────────────────────────────
84
- PER_CONF = 0.35
85
  PER_TTA_CONF = 0.25
86
- PER_WBF_IOU = 0.45
87
 
88
  # ── Shared ──────────────────────────────────────────────────────────────────
89
  WBF_SKIP_THR = 0.0001
 
52
 
53
  _preload_cuda_libs()
54
 
55
+ import subprocess as _subprocess
56
+ import sys as _sys
57
+
58
+ def _try_gpu_ort():
59
+ """Attempt runtime install of onnxruntime-gpu for CUDA inference."""
60
+ import time as _t
61
+ _t0 = _t.time()
62
+
63
+ # Diagnostic: torch + CUDA
64
+ try:
65
+ import torch
66
+ print(f"[GPU_SETUP] torch={torch.__version__} cuda={torch.cuda.is_available()} "
67
+ f"devices={torch.cuda.device_count()}", flush=True)
68
+ if torch.cuda.is_available():
69
+ print(f"[GPU_SETUP] GPU: {torch.cuda.get_device_name(0)}", flush=True)
70
+ except Exception as e:
71
+ print(f"[GPU_SETUP] torch check failed: {e}", flush=True)
72
+
73
+ # Check current ORT providers
74
+ try:
75
+ import onnxruntime as _ort
76
+ providers = _ort.get_available_providers()
77
+ print(f"[GPU_SETUP] ORT={_ort.__version__} providers={providers}", flush=True)
78
+ if 'CUDAExecutionProvider' in providers:
79
+ print("[GPU_SETUP] CUDAExecutionProvider already available!", flush=True)
80
+ return
81
+ except ImportError:
82
+ print("[GPU_SETUP] onnxruntime not importable", flush=True)
83
+ return
84
+
85
+ # List key packages for diagnostics
86
+ result = _subprocess.run(
87
+ [_sys.executable, '-m', 'pip', 'list', '--format=columns'],
88
+ capture_output=True, text=True, timeout=15
89
+ )
90
+ for pkg in ['onnxruntime', 'torch', 'nvidia-cu', 'ultralytics']:
91
+ for line in result.stdout.splitlines():
92
+ if pkg.lower() in line.lower():
93
+ print(f"[GPU_SETUP] pkg: {line.strip()}", flush=True)
94
+
95
+ print("[GPU_SETUP] Attempting onnxruntime-gpu install...", flush=True)
96
+ try:
97
+ # Uninstall CPU onnxruntime
98
+ r1 = _subprocess.run(
99
+ [_sys.executable, '-m', 'pip', 'uninstall', 'onnxruntime', '-y'],
100
+ capture_output=True, text=True, timeout=30
101
+ )
102
+ print(f"[GPU_SETUP] uninstall rc={r1.returncode}", flush=True)
103
+
104
+ # Install GPU version (no-deps to avoid reinstalling torch etc)
105
+ r2 = _subprocess.run(
106
+ [_sys.executable, '-m', 'pip', 'install', 'onnxruntime-gpu', '--no-deps'],
107
+ capture_output=True, text=True, timeout=180
108
+ )
109
+ print(f"[GPU_SETUP] install rc={r2.returncode}", flush=True)
110
+ if r2.stdout:
111
+ for line in r2.stdout.strip().splitlines()[-3:]:
112
+ print(f"[GPU_SETUP] stdout: {line}", flush=True)
113
+ if r2.stderr:
114
+ for line in r2.stderr.strip().splitlines()[-3:]:
115
+ print(f"[GPU_SETUP] stderr: {line}", flush=True)
116
+
117
+ if r2.returncode != 0:
118
+ print("[GPU_SETUP] FAILED β€” reinstalling CPU onnxruntime", flush=True)
119
+ _subprocess.run(
120
+ [_sys.executable, '-m', 'pip', 'install', 'onnxruntime', '--no-deps'],
121
+ capture_output=True, timeout=60
122
+ )
123
+ return
124
+
125
+ # Clear cached onnxruntime modules
126
+ for key in list(_sys.modules.keys()):
127
+ if 'onnxruntime' in key:
128
+ del _sys.modules[key]
129
+
130
+ # Reimport and check
131
+ import onnxruntime as _ort2
132
+ new_providers = _ort2.get_available_providers()
133
+ _dt = _t.time() - _t0
134
+ print(f"[GPU_SETUP] SUCCESS: ORT={_ort2.__version__} providers={new_providers} ({_dt:.1f}s)", flush=True)
135
+
136
+ except Exception as e:
137
+ print(f"[GPU_SETUP] EXCEPTION: {e}", flush=True)
138
+ # Try to restore CPU onnxruntime
139
+ try:
140
+ for key in list(_sys.modules.keys()):
141
+ if 'onnxruntime' in key:
142
+ del _sys.modules[key]
143
+ _subprocess.run(
144
+ [_sys.executable, '-m', 'pip', 'install', 'onnxruntime', '--no-deps'],
145
+ capture_output=True, timeout=60
146
+ )
147
+ print("[GPU_SETUP] Restored CPU onnxruntime", flush=True)
148
+ except Exception:
149
+ pass
150
+
151
+ _try_gpu_ort()
152
+
153
 
154
  from pathlib import Path
155
  import math
 
173
  VEH_MODEL_TO_OUT: dict[int, int] = {0: 1, 1: 0, 2: 2, 3: 3}
174
  VEH_NUM_CLASSES = 4
175
  # VEH_IMG_SIZE: now read dynamically from model input shape in __init__
176
+ VEH_CONF_PER_CLASS = {0: 0.15, 1: 0.30, 2: 0.20, 3: 0.15}
177
  VEH_CONF_DEFAULT = 0.35
178
+ VEH_TTA_CONF = 0.10
179
+ VEH_WBF_IOU = 0.40
180
 
181
  # ── Person config ───────────────────────────────────────────────────────────
182
+ PER_CONF = 0.15
183
  PER_TTA_CONF = 0.25
184
+ PER_WBF_IOU = 0.40
185
 
186
  # ── Shared ──────────────────────────────────────────────────────────────────
187
  WBF_SKIP_THR = 0.0001