Spaces:
Running
Running
Raj Bhalerao commited on
Commit ·
9fe06be
1
Parent(s): faa7e0c
export options
Browse files- backend/engine.py +38 -14
- backend/server.py +2 -1
- frontend/vehicles.html +121 -8
backend/engine.py
CHANGED
|
@@ -4,6 +4,7 @@ import tempfile
|
|
| 4 |
import numpy as np
|
| 5 |
import cv2
|
| 6 |
from collections import defaultdict
|
|
|
|
| 7 |
|
| 8 |
|
| 9 |
def _side(p, a, b):
|
|
@@ -25,27 +26,50 @@ _CLR_LINE = (80, 220, 100) # green
|
|
| 25 |
_CLR_TEXT_BG = (30, 30, 30) # dark bg for text
|
| 26 |
|
| 27 |
|
| 28 |
-
def _draw_annotations(frame, boxes, ids, line_pts):
|
| 29 |
"""Draw bounding boxes, track IDs, and counting line on frame in-place."""
|
| 30 |
-
# Counting line
|
| 31 |
-
|
|
|
|
| 32 |
|
| 33 |
-
if boxes is not None and ids is not None:
|
| 34 |
-
for box, obj_id in zip(boxes, ids):
|
| 35 |
x1, y1, x2, y2 = map(int, box)
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 44 |
"""
|
| 45 |
Runs YOLO tracking on video. Calls on_frame(update_dict) after each processed frame.
|
| 46 |
line: [[x1,y1], [x2,y2]]
|
| 47 |
save_annotated: if True, writes annotated MP4 with boxes + IDs + counting line
|
|
|
|
| 48 |
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 49 |
cap = cv2.VideoCapture(video_path)
|
| 50 |
fps = cap.get(cv2.CAP_PROP_FPS)
|
| 51 |
total = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
|
@@ -135,7 +159,7 @@ def run(model, video_path, line, config, on_frame, save_annotated=False):
|
|
| 135 |
# Write annotated frame (only for frames we already process)
|
| 136 |
if writer is not None:
|
| 137 |
frame = r.orig_img.copy()
|
| 138 |
-
_draw_annotations(frame, cur_boxes, cur_ids, [a, b])
|
| 139 |
writer.write(frame)
|
| 140 |
|
| 141 |
congestion.append(active)
|
|
|
|
| 4 |
import numpy as np
|
| 5 |
import cv2
|
| 6 |
from collections import defaultdict
|
| 7 |
+
from constants import MODEL_CLASSES
|
| 8 |
|
| 9 |
|
| 10 |
def _side(p, a, b):
|
|
|
|
| 26 |
_CLR_TEXT_BG = (30, 30, 30) # dark bg for text
|
| 27 |
|
| 28 |
|
| 29 |
+
def _draw_annotations(frame, boxes, ids, clses, line_pts, options):
|
| 30 |
"""Draw bounding boxes, track IDs, and counting line on frame in-place."""
|
| 31 |
+
# Counting line (Spatial Boundary)
|
| 32 |
+
if options.get("spatial", True):
|
| 33 |
+
cv2.line(frame, tuple(line_pts[0]), tuple(line_pts[1]), _CLR_LINE, 3, cv2.LINE_AA)
|
| 34 |
|
| 35 |
+
if boxes is not None and ids is not None and clses is not None:
|
| 36 |
+
for box, obj_id, cls_idx in zip(boxes, ids, clses):
|
| 37 |
x1, y1, x2, y2 = map(int, box)
|
| 38 |
+
|
| 39 |
+
# Bounding Box
|
| 40 |
+
if options.get("bbox", True):
|
| 41 |
+
cv2.rectangle(frame, (x1, y1), (x2, y2), _CLR_BOX, 2)
|
| 42 |
+
|
| 43 |
+
# Labels
|
| 44 |
+
labels = []
|
| 45 |
+
if options.get("track_id", True):
|
| 46 |
+
labels.append(f"ID:{int(obj_id)}")
|
| 47 |
+
if options.get("class_name", True):
|
| 48 |
+
labels.append(MODEL_CLASSES.get(int(cls_idx), "Unknown"))
|
| 49 |
+
elif options.get("class_id", False):
|
| 50 |
+
labels.append(f"C:{int(cls_idx)}")
|
| 51 |
+
|
| 52 |
+
if labels:
|
| 53 |
+
label_text = " | ".join(labels)
|
| 54 |
+
(tw, th), _ = cv2.getTextSize(label_text, cv2.FONT_HERSHEY_SIMPLEX, 0.4, 1)
|
| 55 |
+
cv2.rectangle(frame, (x1, y1 - th - 6), (x1 + tw + 6, y1), _CLR_TEXT_BG, -1)
|
| 56 |
+
cv2.putText(frame, label_text, (x1 + 3, y1 - 4), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 255, 255), 1, cv2.LINE_AA)
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def run(model, video_path, line, config, on_frame, save_annotated=False, annotated_options=None):
|
| 60 |
"""
|
| 61 |
Runs YOLO tracking on video. Calls on_frame(update_dict) after each processed frame.
|
| 62 |
line: [[x1,y1], [x2,y2]]
|
| 63 |
save_annotated: if True, writes annotated MP4 with boxes + IDs + counting line
|
| 64 |
+
annotated_options: dict of toggleable visual overlays
|
| 65 |
"""
|
| 66 |
+
if annotated_options is None:
|
| 67 |
+
annotated_options = {"bbox": True, "track_id": True, "spatial": True}
|
| 68 |
+
|
| 69 |
+
# Force bbox to True if export is enabled (user requirement)
|
| 70 |
+
if save_annotated:
|
| 71 |
+
annotated_options["bbox"] = True
|
| 72 |
+
|
| 73 |
cap = cv2.VideoCapture(video_path)
|
| 74 |
fps = cap.get(cv2.CAP_PROP_FPS)
|
| 75 |
total = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
|
|
|
| 159 |
# Write annotated frame (only for frames we already process)
|
| 160 |
if writer is not None:
|
| 161 |
frame = r.orig_img.copy()
|
| 162 |
+
_draw_annotations(frame, cur_boxes, cur_ids, cls if r.boxes.id is not None else None, [a, b], annotated_options)
|
| 163 |
writer.write(frame)
|
| 164 |
|
| 165 |
congestion.append(active)
|
backend/server.py
CHANGED
|
@@ -125,6 +125,7 @@ async def ws_run(ws: WebSocket):
|
|
| 125 |
line = data["line"]
|
| 126 |
cfg = data["config"]
|
| 127 |
save_annotated = data.get("annotated_video", False)
|
|
|
|
| 128 |
report_format = data.get("report_format", "png")
|
| 129 |
|
| 130 |
path = videos.get(video_id)
|
|
@@ -137,7 +138,7 @@ async def ws_run(ws: WebSocket):
|
|
| 137 |
loop.call_soon_threadsafe(queue.put_nowait, update)
|
| 138 |
|
| 139 |
task = loop.run_in_executor(
|
| 140 |
-
None, run, model, path, line, cfg, on_frame, save_annotated
|
| 141 |
)
|
| 142 |
|
| 143 |
try:
|
|
|
|
| 125 |
line = data["line"]
|
| 126 |
cfg = data["config"]
|
| 127 |
save_annotated = data.get("annotated_video", False)
|
| 128 |
+
annotated_options = data.get("annotated_options", {"bbox": True, "track_id": True, "spatial": True})
|
| 129 |
report_format = data.get("report_format", "png")
|
| 130 |
|
| 131 |
path = videos.get(video_id)
|
|
|
|
| 138 |
loop.call_soon_threadsafe(queue.put_nowait, update)
|
| 139 |
|
| 140 |
task = loop.run_in_executor(
|
| 141 |
+
None, run, model, path, line, cfg, on_frame, save_annotated, annotated_options
|
| 142 |
)
|
| 143 |
|
| 144 |
try:
|
frontend/vehicles.html
CHANGED
|
@@ -262,6 +262,68 @@
|
|
| 262 |
#btn-start-processing {
|
| 263 |
font-family: 'Montserrat', sans-serif !important;
|
| 264 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 265 |
</style>
|
| 266 |
</head>
|
| 267 |
|
|
@@ -611,14 +673,34 @@
|
|
| 611 |
<option value="pdf">PDF Document</option>
|
| 612 |
</select>
|
| 613 |
</div>
|
| 614 |
-
<div class="s-row" data-param="annotated">
|
| 615 |
-
<div>
|
| 616 |
-
<div
|
| 617 |
-
|
| 618 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 619 |
</div>
|
| 620 |
-
|
| 621 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 622 |
</div>
|
| 623 |
</div>
|
| 624 |
</div>
|
|
@@ -1121,6 +1203,15 @@
|
|
| 1121 |
const reportFmt = document.getElementById('sv-report').value;
|
| 1122 |
const annotated = document.getElementById('sv-annotated').classList.contains('active');
|
| 1123 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1124 |
// Apply to config
|
| 1125 |
_params.config.imgsz = imgsz;
|
| 1126 |
_params.config.conf = conf;
|
|
@@ -1158,6 +1249,7 @@
|
|
| 1158 |
line: _params.line,
|
| 1159 |
config: _params.config,
|
| 1160 |
annotated_video: annotated,
|
|
|
|
| 1161 |
report_format: reportFmt
|
| 1162 |
}));
|
| 1163 |
};
|
|
@@ -1236,7 +1328,7 @@
|
|
| 1236 |
'class_dominance.pdf': { title: 'Class Dominance', desc: 'Vehicle count by classification type' },
|
| 1237 |
'confidence_dist.png': { title: 'Confidence Distribution', desc: 'Detection confidence histogram' },
|
| 1238 |
'confidence_dist.pdf': { title: 'Confidence Distribution', desc: 'Detection confidence histogram' },
|
| 1239 |
-
'annotated.mp4': { title: 'Annotated Video', desc: 'Video with
|
| 1240 |
};
|
| 1241 |
|
| 1242 |
async function loadReports(videoId) {
|
|
@@ -1293,6 +1385,27 @@
|
|
| 1293 |
});
|
| 1294 |
}
|
| 1295 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1296 |
init();
|
| 1297 |
</script>
|
| 1298 |
</body>
|
|
|
|
| 262 |
#btn-start-processing {
|
| 263 |
font-family: 'Montserrat', sans-serif !important;
|
| 264 |
}
|
| 265 |
+
|
| 266 |
+
/* Chips */
|
| 267 |
+
.chip-container {
|
| 268 |
+
display: flex;
|
| 269 |
+
flex-wrap: wrap;
|
| 270 |
+
gap: 8px;
|
| 271 |
+
margin-top: 12px;
|
| 272 |
+
padding-top: 12px;
|
| 273 |
+
border-top: 1px solid #1a1a1a;
|
| 274 |
+
transition: all 0.3s ease;
|
| 275 |
+
}
|
| 276 |
+
|
| 277 |
+
.chip {
|
| 278 |
+
display: inline-flex;
|
| 279 |
+
align-items: center;
|
| 280 |
+
gap: 6px;
|
| 281 |
+
padding: 6px 14px;
|
| 282 |
+
border-radius: 9999px;
|
| 283 |
+
font-size: 10px;
|
| 284 |
+
font-weight: 700;
|
| 285 |
+
cursor: pointer;
|
| 286 |
+
transition: all 0.2s ease;
|
| 287 |
+
user-select: none;
|
| 288 |
+
border: 1px solid #333333;
|
| 289 |
+
background: rgba(255, 255, 255, 0.03);
|
| 290 |
+
color: #888888;
|
| 291 |
+
}
|
| 292 |
+
|
| 293 |
+
.chip.active {
|
| 294 |
+
background: #ffffff;
|
| 295 |
+
color: #000000;
|
| 296 |
+
border-color: #ffffff;
|
| 297 |
+
}
|
| 298 |
+
|
| 299 |
+
.chip.frozen {
|
| 300 |
+
background: rgba(255, 255, 255, 0.4);
|
| 301 |
+
color: #000000;
|
| 302 |
+
border-color: transparent;
|
| 303 |
+
cursor: default !important;
|
| 304 |
+
pointer-events: none;
|
| 305 |
+
}
|
| 306 |
+
|
| 307 |
+
.chip:hover {
|
| 308 |
+
border-color: #666666;
|
| 309 |
+
}
|
| 310 |
+
|
| 311 |
+
.chip.active:hover {
|
| 312 |
+
background: #eeeeee;
|
| 313 |
+
}
|
| 314 |
+
|
| 315 |
+
.chip i {
|
| 316 |
+
font-size: 9px;
|
| 317 |
+
}
|
| 318 |
+
|
| 319 |
+
.hidden-chip-container {
|
| 320 |
+
height: 0;
|
| 321 |
+
opacity: 0;
|
| 322 |
+
overflow: hidden;
|
| 323 |
+
margin-top: 0;
|
| 324 |
+
padding-top: 0;
|
| 325 |
+
border-top: none;
|
| 326 |
+
}
|
| 327 |
</style>
|
| 328 |
</head>
|
| 329 |
|
|
|
|
| 673 |
<option value="pdf">PDF Document</option>
|
| 674 |
</select>
|
| 675 |
</div>
|
| 676 |
+
<div class="s-row flex-col items-stretch" data-param="annotated">
|
| 677 |
+
<div class="flex items-center justify-between w-full">
|
| 678 |
+
<div>
|
| 679 |
+
<div class="text-xs font-semibold text-slate-700">Export Annotated Video</div>
|
| 680 |
+
<div class="text-[10px] text-slate-400">Layered visual overlays for diagnostic
|
| 681 |
+
analysis</div>
|
| 682 |
+
</div>
|
| 683 |
+
<div class="toggle-track" id="sv-annotated" onclick="toggleExportMaster(this)">
|
| 684 |
+
<div class="toggle-thumb"></div>
|
| 685 |
+
</div>
|
| 686 |
</div>
|
| 687 |
+
|
| 688 |
+
<div id="chip-selector" class="chip-container hidden-chip-container">
|
| 689 |
+
<div class="chip frozen" id="chip-bbox">
|
| 690 |
+
<i class="fa-solid fa-check"></i> Bounding Boxes
|
| 691 |
+
</div>
|
| 692 |
+
<div class="chip active" id="chip-spatial" onclick="toggleChip('spatial')">
|
| 693 |
+
<i class="fa-solid fa-check"></i> Spatial Boundary
|
| 694 |
+
</div>
|
| 695 |
+
<div class="chip active" id="chip-class_id" onclick="toggleChip('class_id')">
|
| 696 |
+
<i class="fa-solid fa-check"></i> Class ID
|
| 697 |
+
</div>
|
| 698 |
+
<div class="chip active" id="chip-class_name" onclick="toggleChip('class_name')">
|
| 699 |
+
<i class="fa-solid fa-check"></i> Class Names
|
| 700 |
+
</div>
|
| 701 |
+
<div class="chip" id="chip-track_id" onclick="toggleChip('track_id')">
|
| 702 |
+
<i class="fa-solid fa-plus"></i> Track IDs
|
| 703 |
+
</div>
|
| 704 |
</div>
|
| 705 |
</div>
|
| 706 |
</div>
|
|
|
|
| 1203 |
const reportFmt = document.getElementById('sv-report').value;
|
| 1204 |
const annotated = document.getElementById('sv-annotated').classList.contains('active');
|
| 1205 |
|
| 1206 |
+
// Annotation Options
|
| 1207 |
+
const annotated_options = {
|
| 1208 |
+
bbox: true, // Always true if export is enabled
|
| 1209 |
+
spatial: document.getElementById('chip-spatial').classList.contains('active'),
|
| 1210 |
+
class_name: document.getElementById('chip-class_name').classList.contains('active'),
|
| 1211 |
+
class_id: document.getElementById('chip-class_id').classList.contains('active'),
|
| 1212 |
+
track_id: document.getElementById('chip-track_id').classList.contains('active')
|
| 1213 |
+
};
|
| 1214 |
+
|
| 1215 |
// Apply to config
|
| 1216 |
_params.config.imgsz = imgsz;
|
| 1217 |
_params.config.conf = conf;
|
|
|
|
| 1249 |
line: _params.line,
|
| 1250 |
config: _params.config,
|
| 1251 |
annotated_video: annotated,
|
| 1252 |
+
annotated_options: annotated_options,
|
| 1253 |
report_format: reportFmt
|
| 1254 |
}));
|
| 1255 |
};
|
|
|
|
| 1328 |
'class_dominance.pdf': { title: 'Class Dominance', desc: 'Vehicle count by classification type' },
|
| 1329 |
'confidence_dist.png': { title: 'Confidence Distribution', desc: 'Detection confidence histogram' },
|
| 1330 |
'confidence_dist.pdf': { title: 'Confidence Distribution', desc: 'Detection confidence histogram' },
|
| 1331 |
+
'annotated.mp4': { title: 'Annotated Video', desc: 'Video with custom diagnostic overlays (Boxes, Names, IDs, etc.)' },
|
| 1332 |
};
|
| 1333 |
|
| 1334 |
async function loadReports(videoId) {
|
|
|
|
| 1385 |
});
|
| 1386 |
}
|
| 1387 |
|
| 1388 |
+
function toggleExportMaster(el) {
|
| 1389 |
+
el.classList.toggle('active');
|
| 1390 |
+
const chips = document.getElementById('chip-selector');
|
| 1391 |
+
if (el.classList.contains('active')) {
|
| 1392 |
+
chips.classList.remove('hidden-chip-container');
|
| 1393 |
+
} else {
|
| 1394 |
+
chips.classList.add('hidden-chip-container');
|
| 1395 |
+
}
|
| 1396 |
+
}
|
| 1397 |
+
|
| 1398 |
+
function toggleChip(id) {
|
| 1399 |
+
const chip = document.getElementById(`chip-${id}`);
|
| 1400 |
+
chip.classList.toggle('active');
|
| 1401 |
+
const icon = chip.querySelector('i');
|
| 1402 |
+
if (chip.classList.contains('active')) {
|
| 1403 |
+
icon.className = 'fa-solid fa-check';
|
| 1404 |
+
} else {
|
| 1405 |
+
icon.className = 'fa-solid fa-plus';
|
| 1406 |
+
}
|
| 1407 |
+
}
|
| 1408 |
+
|
| 1409 |
init();
|
| 1410 |
</script>
|
| 1411 |
</body>
|