Rename ap.py to app.py
Browse files
ap.py
DELETED
|
@@ -1,1199 +0,0 @@
|
|
| 1 |
-
# app.py
|
| 2 |
-
# Layer 1: Color-Metric Logic (LAB Space) β Fail on dark metallic grey
|
| 3 |
-
# Layer 2: ResNet Template Matching β Pass only if matches golden template
|
| 4 |
-
|
| 5 |
-
import gradio as gr
|
| 6 |
-
import cv2
|
| 7 |
-
import torch
|
| 8 |
-
import torch.nn as nn
|
| 9 |
-
from torchvision import models, transforms
|
| 10 |
-
from PIL import Image
|
| 11 |
-
import numpy as np
|
| 12 |
-
import pickle
|
| 13 |
-
import os
|
| 14 |
-
from dataclasses import dataclass
|
| 15 |
-
from typing import Tuple, List, Optional
|
| 16 |
-
|
| 17 |
-
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 18 |
-
# Configuration & Data Classes
|
| 19 |
-
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 20 |
-
|
| 21 |
-
@dataclass
|
| 22 |
-
class ColorThresholds:
|
| 23 |
-
"""LAB color space thresholds for saddle surface classification."""
|
| 24 |
-
# Light Grey (PASS): High L, low saturation
|
| 25 |
-
light_grey_L_min: float = 110.0 # Lightness threshold
|
| 26 |
-
light_grey_L_max: float = 200.0
|
| 27 |
-
light_grey_saturation_max: float = 30.0 # Low color saturation
|
| 28 |
-
|
| 29 |
-
# Dark Metallic Grey (FAIL): Lower L, slightly higher saturation
|
| 30 |
-
dark_metallic_L_max: float = 105.0 # Darker = defect
|
| 31 |
-
|
| 32 |
-
# Detection confidence
|
| 33 |
-
defect_pixel_ratio_threshold: float = 0.15 # 15% dark pixels = fail
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
@dataclass
|
| 37 |
-
class DetectionResult:
|
| 38 |
-
"""Result from multi-stage detection."""
|
| 39 |
-
stage: int # 1 = color check, 2 = template match
|
| 40 |
-
status: str # "PASS" or "FAIL"
|
| 41 |
-
reason: str
|
| 42 |
-
confidence: float
|
| 43 |
-
saddle_colors: List[str] # Color of each saddle
|
| 44 |
-
template_match_score: Optional[float] = None
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 48 |
-
# Multi-Stage Detector
|
| 49 |
-
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 50 |
-
|
| 51 |
-
class MultiStageEngineDetector:
|
| 52 |
-
def __init__(
|
| 53 |
-
self,
|
| 54 |
-
clahe_clip_limit: float = 2.0,
|
| 55 |
-
clahe_tile_grid: tuple = (8, 8),
|
| 56 |
-
color_thresholds: Optional[ColorThresholds] = None,
|
| 57 |
-
):
|
| 58 |
-
# ββ ResNet-50 backbone βββββββββββββββββββββββββββββββββββββββββ
|
| 59 |
-
self.model = models.resnet50(weights='IMAGENET1K_V1')
|
| 60 |
-
self.model = nn.Sequential(*list(self.model.children())[:-1])
|
| 61 |
-
self.model.eval()
|
| 62 |
-
|
| 63 |
-
# ββ CLAHE ββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 64 |
-
self.clahe = cv2.createCLAHE(
|
| 65 |
-
clipLimit=clahe_clip_limit,
|
| 66 |
-
tileGridSize=clahe_tile_grid,
|
| 67 |
-
)
|
| 68 |
-
|
| 69 |
-
# ββ ResNet transform βββββββββββββββββββββββββββββββββββββββββββ
|
| 70 |
-
self.transform = transforms.Compose([
|
| 71 |
-
transforms.Resize((224, 224)),
|
| 72 |
-
transforms.ToTensor(),
|
| 73 |
-
transforms.Normalize(
|
| 74 |
-
mean=[0.485, 0.456, 0.406],
|
| 75 |
-
std=[0.229, 0.224, 0.225],
|
| 76 |
-
)
|
| 77 |
-
])
|
| 78 |
-
|
| 79 |
-
# ββ Color thresholds βββββββββββββββββββββββββββββββββββββββββββ
|
| 80 |
-
self.color_thresholds = color_thresholds or ColorThresholds()
|
| 81 |
-
|
| 82 |
-
# ββ Templates (golden samples) βββββββββββββββββββββββββββββββββ
|
| 83 |
-
self.templates = {}
|
| 84 |
-
self.load_templates()
|
| 85 |
-
|
| 86 |
-
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 87 |
-
# LAYER 1: Color-Metric Logic (LAB Space)
|
| 88 |
-
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 89 |
-
|
| 90 |
-
def apply_clahe(self, image: np.ndarray) -> np.ndarray:
|
| 91 |
-
"""Apply CLAHE preprocessing."""
|
| 92 |
-
bgr = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
|
| 93 |
-
lab = cv2.cvtColor(bgr, cv2.COLOR_BGR2LAB)
|
| 94 |
-
l, a, b = cv2.split(lab)
|
| 95 |
-
l_enh = self.clahe.apply(l)
|
| 96 |
-
lab_enh = cv2.merge([l_enh, a, b])
|
| 97 |
-
bgr_enh = cv2.cvtColor(lab_enh, cv2.COLOR_LAB2BGR)
|
| 98 |
-
return cv2.cvtColor(bgr_enh, cv2.COLOR_BGR2RGB)
|
| 99 |
-
|
| 100 |
-
def analyze_full_image_color(self, image: np.ndarray) -> Tuple[str, float, dict]:
|
| 101 |
-
"""
|
| 102 |
-
Analyze entire image for color classification in LAB space.
|
| 103 |
-
No region detection - checks overall saddle surface color.
|
| 104 |
-
|
| 105 |
-
Returns:
|
| 106 |
-
color_class: "LIGHT_GREY" or "DARK_METALLIC"
|
| 107 |
-
confidence: 0-1 confidence score
|
| 108 |
-
stats: Color statistics
|
| 109 |
-
"""
|
| 110 |
-
# Convert to LAB
|
| 111 |
-
bgr = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
|
| 112 |
-
lab = cv2.cvtColor(bgr, cv2.COLOR_BGR2LAB)
|
| 113 |
-
l_channel, a_channel, b_channel = cv2.split(lab)
|
| 114 |
-
|
| 115 |
-
# Focus on metallic regions (saddle surfaces)
|
| 116 |
-
# Create mask for bright metallic areas (saddles are lighter than engine block)
|
| 117 |
-
_, saddle_mask = cv2.threshold(l_channel, 80, 255, cv2.THRESH_BINARY)
|
| 118 |
-
|
| 119 |
-
# Morphological operations to clean up
|
| 120 |
-
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (15, 15))
|
| 121 |
-
saddle_mask = cv2.morphologyEx(saddle_mask, cv2.MORPH_CLOSE, kernel)
|
| 122 |
-
saddle_mask = cv2.morphologyEx(saddle_mask, cv2.MORPH_OPEN, kernel)
|
| 123 |
-
|
| 124 |
-
# If no saddle regions detected, analyze full image
|
| 125 |
-
if np.sum(saddle_mask) < 0.05 * saddle_mask.size:
|
| 126 |
-
saddle_mask = np.ones_like(l_channel) * 255
|
| 127 |
-
|
| 128 |
-
# Calculate mean L (lightness) in saddle regions
|
| 129 |
-
l_mean = cv2.mean(l_channel, mask=saddle_mask)[0]
|
| 130 |
-
a_mean = cv2.mean(a_channel, mask=saddle_mask)[0]
|
| 131 |
-
b_mean = cv2.mean(b_channel, mask=saddle_mask)[0]
|
| 132 |
-
|
| 133 |
-
# Calculate saturation (distance from neutral gray in a-b plane)
|
| 134 |
-
saturation = np.sqrt((a_mean - 128)**2 + (b_mean - 128)**2)
|
| 135 |
-
|
| 136 |
-
# Calculate percentage of dark pixels in saddle regions
|
| 137 |
-
dark_pixels = np.sum((l_channel < self.color_thresholds.dark_metallic_L_max) & (saddle_mask > 0))
|
| 138 |
-
total_pixels = np.sum(saddle_mask > 0)
|
| 139 |
-
dark_ratio = dark_pixels / total_pixels if total_pixels > 0 else 0
|
| 140 |
-
|
| 141 |
-
# Calculate histogram distribution
|
| 142 |
-
hist = cv2.calcHist([l_channel], [0], saddle_mask, [256], [0, 256])
|
| 143 |
-
hist = hist.flatten() / hist.sum()
|
| 144 |
-
|
| 145 |
-
# Check for bimodal distribution (mix of light and dark)
|
| 146 |
-
# Dark peak (0-105) vs Light peak (110-200)
|
| 147 |
-
dark_peak_mass = hist[0:105].sum()
|
| 148 |
-
light_peak_mass = hist[110:200].sum()
|
| 149 |
-
|
| 150 |
-
stats = {
|
| 151 |
-
'l_mean': l_mean,
|
| 152 |
-
'a_mean': a_mean,
|
| 153 |
-
'b_mean': b_mean,
|
| 154 |
-
'saturation': saturation,
|
| 155 |
-
'dark_ratio': dark_ratio,
|
| 156 |
-
'dark_peak_mass': dark_peak_mass,
|
| 157 |
-
'light_peak_mass': light_peak_mass,
|
| 158 |
-
'saddle_pixels': total_pixels
|
| 159 |
-
}
|
| 160 |
-
|
| 161 |
-
# FAIL conditions (any triggers fail)
|
| 162 |
-
# Condition 1: Mean lightness too low
|
| 163 |
-
if l_mean < self.color_thresholds.dark_metallic_L_max:
|
| 164 |
-
confidence = 1.0 - (l_mean / self.color_thresholds.dark_metallic_L_max)
|
| 165 |
-
return "DARK_METALLIC", confidence, stats
|
| 166 |
-
|
| 167 |
-
# Condition 2: High ratio of dark pixels
|
| 168 |
-
if dark_ratio > self.color_thresholds.defect_pixel_ratio_threshold:
|
| 169 |
-
return "DARK_METALLIC", min(dark_ratio * 2, 1.0), stats
|
| 170 |
-
|
| 171 |
-
# Condition 3: Significant dark peak in histogram (corrupted surfaces)
|
| 172 |
-
if dark_peak_mass > 0.20: # More than 20% of pixels in dark range
|
| 173 |
-
return "DARK_METALLIC", dark_peak_mass, stats
|
| 174 |
-
|
| 175 |
-
# PASS condition
|
| 176 |
-
if (self.color_thresholds.light_grey_L_min <= l_mean <= self.color_thresholds.light_grey_L_max and
|
| 177 |
-
saturation < self.color_thresholds.light_grey_saturation_max):
|
| 178 |
-
confidence = min(1.0, light_peak_mass + 0.5) # Higher if concentrated in light range
|
| 179 |
-
return "LIGHT_GREY", confidence, stats
|
| 180 |
-
|
| 181 |
-
# Ambiguous - default to LIGHT_GREY with low confidence
|
| 182 |
-
return "LIGHT_GREY", 0.3, stats
|
| 183 |
-
|
| 184 |
-
def layer1_color_check(self, image: np.ndarray) -> DetectionResult:
|
| 185 |
-
"""
|
| 186 |
-
Layer 1: Full-image color-metric logic.
|
| 187 |
-
FAIL if image contains dark metallic grey surfaces.
|
| 188 |
-
PASS if predominantly light grey.
|
| 189 |
-
"""
|
| 190 |
-
# Normalize input
|
| 191 |
-
if isinstance(image, Image.Image):
|
| 192 |
-
image = np.array(image.convert("RGB"))
|
| 193 |
-
elif image.dtype != np.uint8:
|
| 194 |
-
image = np.clip(image, 0, 255).astype(np.uint8)
|
| 195 |
-
|
| 196 |
-
# Apply CLAHE preprocessing
|
| 197 |
-
image_enhanced = self.apply_clahe(image)
|
| 198 |
-
|
| 199 |
-
# Analyze full image color
|
| 200 |
-
color_class, confidence, stats = self.analyze_full_image_color(image_enhanced)
|
| 201 |
-
|
| 202 |
-
# Result
|
| 203 |
-
if color_class == "DARK_METALLIC":
|
| 204 |
-
reason_parts = []
|
| 205 |
-
if stats['l_mean'] < self.color_thresholds.dark_metallic_L_max:
|
| 206 |
-
reason_parts.append(f"Low lightness (L={stats['l_mean']:.1f})")
|
| 207 |
-
if stats['dark_ratio'] > self.color_thresholds.defect_pixel_ratio_threshold:
|
| 208 |
-
reason_parts.append(f"High dark pixel ratio ({stats['dark_ratio']:.1%})")
|
| 209 |
-
if stats['dark_peak_mass'] > 0.20:
|
| 210 |
-
reason_parts.append(f"Dark surface detected ({stats['dark_peak_mass']:.1%})")
|
| 211 |
-
|
| 212 |
-
reason = "Dark metallic grey detected: " + ", ".join(reason_parts)
|
| 213 |
-
|
| 214 |
-
return DetectionResult(
|
| 215 |
-
stage=1,
|
| 216 |
-
status="FAIL",
|
| 217 |
-
reason=reason,
|
| 218 |
-
confidence=confidence,
|
| 219 |
-
saddle_colors=[color_class]
|
| 220 |
-
)
|
| 221 |
-
else:
|
| 222 |
-
return DetectionResult(
|
| 223 |
-
stage=1,
|
| 224 |
-
status="PASS",
|
| 225 |
-
reason=f"Light grey surface detected (L={stats['l_mean']:.1f}, light_mass={stats['light_peak_mass']:.1%})",
|
| 226 |
-
confidence=confidence,
|
| 227 |
-
saddle_colors=[color_class]
|
| 228 |
-
)
|
| 229 |
-
|
| 230 |
-
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 231 |
-
# LAYER 2: ResNet Template Matching
|
| 232 |
-
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 233 |
-
|
| 234 |
-
def extract_features(self, image) -> np.ndarray:
|
| 235 |
-
"""Extract ResNet-50 features with CLAHE preprocessing."""
|
| 236 |
-
# Normalize input
|
| 237 |
-
if isinstance(image, Image.Image):
|
| 238 |
-
image = np.array(image.convert("RGB"))
|
| 239 |
-
elif isinstance(image, np.ndarray):
|
| 240 |
-
if image.dtype != np.uint8:
|
| 241 |
-
image = np.clip(image, 0, 255).astype(np.uint8)
|
| 242 |
-
if image.ndim == 2:
|
| 243 |
-
image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
|
| 244 |
-
|
| 245 |
-
# CLAHE + blur
|
| 246 |
-
image = self.apply_clahe(image)
|
| 247 |
-
image = cv2.GaussianBlur(image, (3, 3), 0)
|
| 248 |
-
|
| 249 |
-
# ResNet forward pass
|
| 250 |
-
img_tensor = self.transform(Image.fromarray(image)).unsqueeze(0)
|
| 251 |
-
with torch.no_grad():
|
| 252 |
-
features = self.model(img_tensor).squeeze().numpy()
|
| 253 |
-
|
| 254 |
-
return features
|
| 255 |
-
|
| 256 |
-
@staticmethod
|
| 257 |
-
def cosine_similarity(f1: np.ndarray, f2: np.ndarray) -> float:
|
| 258 |
-
return float(np.dot(f1, f2) / (np.linalg.norm(f1) * np.linalg.norm(f2) + 1e-8))
|
| 259 |
-
|
| 260 |
-
def layer2_template_match(self, image, threshold: float = 0.70) -> DetectionResult:
|
| 261 |
-
"""
|
| 262 |
-
Layer 2: ResNet template matching against golden samples.
|
| 263 |
-
PASS only if similarity >= threshold.
|
| 264 |
-
FAIL otherwise (even if close).
|
| 265 |
-
"""
|
| 266 |
-
if not self.templates:
|
| 267 |
-
return DetectionResult(
|
| 268 |
-
stage=2,
|
| 269 |
-
status="FAIL",
|
| 270 |
-
reason="No golden templates available. Please add templates first.",
|
| 271 |
-
confidence=1.0,
|
| 272 |
-
saddle_colors=[],
|
| 273 |
-
template_match_score=None
|
| 274 |
-
)
|
| 275 |
-
|
| 276 |
-
# Extract features
|
| 277 |
-
query_feat = self.extract_features(image)
|
| 278 |
-
|
| 279 |
-
# Find best match
|
| 280 |
-
results = [(name, self.cosine_similarity(query_feat, feat))
|
| 281 |
-
for name, feat in self.templates.items()]
|
| 282 |
-
results.sort(key=lambda x: x[1], reverse=True)
|
| 283 |
-
|
| 284 |
-
best_name, best_score = results[0]
|
| 285 |
-
|
| 286 |
-
# Strict threshold (no false negatives)
|
| 287 |
-
if best_score >= threshold:
|
| 288 |
-
return DetectionResult(
|
| 289 |
-
stage=2,
|
| 290 |
-
status="PASS",
|
| 291 |
-
reason=f"Matched golden template '{best_name}' with {best_score:.2%} confidence",
|
| 292 |
-
confidence=best_score,
|
| 293 |
-
saddle_colors=[],
|
| 294 |
-
template_match_score=best_score
|
| 295 |
-
)
|
| 296 |
-
else:
|
| 297 |
-
return DetectionResult(
|
| 298 |
-
stage=2,
|
| 299 |
-
status="FAIL",
|
| 300 |
-
reason=f"Best match '{best_name}' ({best_score:.2%}) below threshold ({threshold:.0%}). Surface defects detected.",
|
| 301 |
-
confidence=1.0 - best_score, # High confidence in failure
|
| 302 |
-
saddle_colors=[],
|
| 303 |
-
template_match_score=best_score
|
| 304 |
-
)
|
| 305 |
-
|
| 306 |
-
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 307 |
-
# Multi-Stage Detection Pipeline
|
| 308 |
-
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 309 |
-
|
| 310 |
-
def detect(self, image, template_threshold: float = 0.70) -> Tuple[DetectionResult, str]:
|
| 311 |
-
"""
|
| 312 |
-
Full two-stage detection pipeline.
|
| 313 |
-
|
| 314 |
-
Returns:
|
| 315 |
-
result: DetectionResult object
|
| 316 |
-
summary: Formatted string for UI
|
| 317 |
-
"""
|
| 318 |
-
# STAGE 1: Color check
|
| 319 |
-
layer1_result = self.layer1_color_check(image)
|
| 320 |
-
|
| 321 |
-
if layer1_result.status == "FAIL":
|
| 322 |
-
# Immediate fail on color metric
|
| 323 |
-
summary = f"""
|
| 324 |
-
## π΄ DEFECTED PIECE
|
| 325 |
-
|
| 326 |
-
**Stage 1: Color-Metric Analysis** β FAILED β
|
| 327 |
-
|
| 328 |
-
**Reason**: {layer1_result.reason}
|
| 329 |
-
|
| 330 |
-
**Analysis Method**: Full-image LAB color space analysis
|
| 331 |
-
|
| 332 |
-
**Decision**: Part rejected at Stage 1. No template matching performed.
|
| 333 |
-
|
| 334 |
-
---
|
| 335 |
-
**Detection Confidence**: {layer1_result.confidence:.1%}
|
| 336 |
-
"""
|
| 337 |
-
return layer1_result, summary
|
| 338 |
-
|
| 339 |
-
# STAGE 2: Template matching (only if color check passed)
|
| 340 |
-
layer2_result = self.layer2_template_match(image, template_threshold)
|
| 341 |
-
|
| 342 |
-
if layer2_result.status == "PASS":
|
| 343 |
-
summary = f"""
|
| 344 |
-
## β
PERFECT PIECE
|
| 345 |
-
|
| 346 |
-
**Stage 1: Color-Metric Analysis** β PASSED β
|
| 347 |
-
- {layer1_result.reason}
|
| 348 |
-
|
| 349 |
-
**Stage 2: Template Matching** β PASSED β
|
| 350 |
-
- {layer2_result.reason}
|
| 351 |
-
|
| 352 |
-
**Final Decision**: β
**APPROVED**
|
| 353 |
-
|
| 354 |
-
---
|
| 355 |
-
**Template Match Score**: {layer2_result.template_match_score:.2%}
|
| 356 |
-
**Overall Confidence**: {layer2_result.confidence:.1%}
|
| 357 |
-
"""
|
| 358 |
-
else:
|
| 359 |
-
summary = f"""
|
| 360 |
-
## π‘ DEFECTED PIECE
|
| 361 |
-
|
| 362 |
-
**Stage 1: Color-Metric Analysis** β PASSED β
|
| 363 |
-
- {layer1_result.reason}
|
| 364 |
-
|
| 365 |
-
**Stage 2: Template Matching** β FAILED β
|
| 366 |
-
- {layer2_result.reason}
|
| 367 |
-
|
| 368 |
-
**Final Decision**: β **REJECTED**
|
| 369 |
-
|
| 370 |
-
---
|
| 371 |
-
**Best Template Match**: {layer2_result.template_match_score:.2%}
|
| 372 |
-
**Threshold**: {template_threshold:.0%}
|
| 373 |
-
**Confidence in Defect**: {layer2_result.confidence:.1%}
|
| 374 |
-
"""
|
| 375 |
-
|
| 376 |
-
return layer2_result, summary
|
| 377 |
-
|
| 378 |
-
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 379 |
-
# Template Management
|
| 380 |
-
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 381 |
-
|
| 382 |
-
def save_template(self, image, part_name: str) -> str:
|
| 383 |
-
if image is None or not part_name.strip():
|
| 384 |
-
return "β οΈ Please provide both an image and a part name."
|
| 385 |
-
|
| 386 |
-
# Verify it's a golden sample (all light grey)
|
| 387 |
-
color_result = self.layer1_color_check(image)
|
| 388 |
-
if color_result.status == "FAIL":
|
| 389 |
-
return f"β Cannot save as golden template: {color_result.reason}"
|
| 390 |
-
|
| 391 |
-
# Extract and save features
|
| 392 |
-
self.templates[part_name.strip()] = self.extract_features(image)
|
| 393 |
-
with open('templates.pkl', 'wb') as f:
|
| 394 |
-
pickle.dump(self.templates, f)
|
| 395 |
-
|
| 396 |
-
return f"β
Golden template '{part_name.strip()}' saved! ({len(self.templates)} total)"
|
| 397 |
-
|
| 398 |
-
def load_templates(self):
|
| 399 |
-
if os.path.exists('templates.pkl'):
|
| 400 |
-
try:
|
| 401 |
-
with open('templates.pkl', 'rb') as f:
|
| 402 |
-
self.templates = pickle.load(f)
|
| 403 |
-
except Exception:
|
| 404 |
-
self.templates = {}
|
| 405 |
-
|
| 406 |
-
def delete_template(self, part_name: str) -> str:
|
| 407 |
-
if part_name in self.templates:
|
| 408 |
-
del self.templates[part_name]
|
| 409 |
-
with open('templates.pkl', 'wb') as f:
|
| 410 |
-
pickle.dump(self.templates, f)
|
| 411 |
-
return f"ποΈ Template '{part_name}' deleted."
|
| 412 |
-
return f"β οΈ Template '{part_name}' not found."
|
| 413 |
-
|
| 414 |
-
|
| 415 |
-
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 416 |
-
# Live Edge Preview
|
| 417 |
-
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 418 |
-
|
| 419 |
-
def live_edge_preview(frame: np.ndarray) -> np.ndarray:
|
| 420 |
-
"""Real-time CLAHE + Canny edge overlay."""
|
| 421 |
-
if frame is None:
|
| 422 |
-
return frame
|
| 423 |
-
if frame.dtype != np.uint8:
|
| 424 |
-
frame = np.clip(frame, 0, 255).astype(np.uint8)
|
| 425 |
-
|
| 426 |
-
gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
|
| 427 |
-
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
|
| 428 |
-
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
|
| 429 |
-
enh = clahe.apply(blurred)
|
| 430 |
-
edges = cv2.Canny(enh, 30, 120)
|
| 431 |
-
|
| 432 |
-
dim = (frame * 0.35).astype(np.uint8)
|
| 433 |
-
dim[edges > 0] = [0, 220, 220]
|
| 434 |
-
return dim
|
| 435 |
-
|
| 436 |
-
|
| 437 |
-
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 438 |
-
# Gradio Callbacks
|
| 439 |
-
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 440 |
-
|
| 441 |
-
detector = MultiStageEngineDetector()
|
| 442 |
-
|
| 443 |
-
|
| 444 |
-
def cb_detect(image, threshold):
|
| 445 |
-
"""Run two-stage detection."""
|
| 446 |
-
if image is None:
|
| 447 |
-
return "β οΈ Please provide an image.", None
|
| 448 |
-
|
| 449 |
-
result, summary = detector.detect(image, template_threshold=threshold)
|
| 450 |
-
label = result.status # "PASS" or "FAIL"
|
| 451 |
-
|
| 452 |
-
return summary, label
|
| 453 |
-
|
| 454 |
-
|
| 455 |
-
def cb_add_template(image, part_name):
|
| 456 |
-
return detector.save_template(image, part_name)
|
| 457 |
-
|
| 458 |
-
|
| 459 |
-
def cb_list_templates():
|
| 460 |
-
if not detector.templates:
|
| 461 |
-
return "No golden templates saved yet."
|
| 462 |
-
lines = [f"- **{n}**" for n in sorted(detector.templates)]
|
| 463 |
-
return f"**{len(detector.templates)} golden template(s):**\n" + "\n".join(lines)
|
| 464 |
-
|
| 465 |
-
|
| 466 |
-
def cb_delete_template(part_name):
|
| 467 |
-
msg = detector.delete_template(part_name)
|
| 468 |
-
return msg, cb_list_templates()
|
| 469 |
-
|
| 470 |
-
|
| 471 |
-
def cb_capture(frame):
|
| 472 |
-
return frame
|
| 473 |
-
|
| 474 |
-
|
| 475 |
-
def cb_list_templates_raw():
|
| 476 |
-
"""Return comma-separated list of template names for API."""
|
| 477 |
-
return ",".join(sorted(detector.templates.keys()))
|
| 478 |
-
|
| 479 |
-
|
| 480 |
-
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 481 |
-
# Gradio UI
|
| 482 |
-
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 483 |
-
|
| 484 |
-
CSS = """
|
| 485 |
-
@import url('https://fonts.googleapis.com/css2?family=Share+Tech+Mono&family=Barlow:wght@300;400;600;700&display=swap');
|
| 486 |
-
:root {
|
| 487 |
-
--bg: #0d0f12; --surface: #151820; --border: #2a3045;
|
| 488 |
-
--accent: #00e5ff; --accent2: #ff6b35; --success: #00e676; --danger: #ff1744;
|
| 489 |
-
--text: #d0d8e8; --muted: #5a6480;
|
| 490 |
-
--mono: 'Share Tech Mono', monospace; --sans: 'Barlow', sans-serif;
|
| 491 |
-
}
|
| 492 |
-
body, .gradio-container { background: var(--bg) !important; font-family: var(--sans) !important; color: var(--text) !important; }
|
| 493 |
-
.eng-title { font-family: var(--mono); font-size: 1.6rem; color: var(--accent); letter-spacing: 0.1em; text-transform: uppercase; }
|
| 494 |
-
button.primary { background: var(--accent) !important; color: #000 !important; font-family: var(--mono) !important; font-weight: 700 !important; }
|
| 495 |
-
.gr-markdown { font-family: var(--mono) !important; font-size: 0.8rem !important; background: var(--surface) !important; border: 1px solid var(--border) !important; padding: 14px !important; }
|
| 496 |
-
"""
|
| 497 |
-
|
| 498 |
-
with gr.Blocks(title="MULTI-STAGE ENGINE DETECTOR") as demo:
|
| 499 |
-
|
| 500 |
-
gr.HTML("""
|
| 501 |
-
<div class='eng-title' style='text-align:center;padding:20px;'>
|
| 502 |
-
β MULTI-STAGE ENGINE SADDLE DETECTOR
|
| 503 |
-
</div>
|
| 504 |
-
<div style='text-align:center;color:#5a6480;font-family:Share Tech Mono,monospace;font-size:0.75rem;margin-bottom:20px;'>
|
| 505 |
-
Layer 1: Color-Metric Logic (LAB) β’ Layer 2: ResNet Template Matching β’ Zero False Negatives
|
| 506 |
-
</div>
|
| 507 |
-
""")
|
| 508 |
-
|
| 509 |
-
with gr.Tabs():
|
| 510 |
-
# Tab 1: Detect
|
| 511 |
-
with gr.TabItem("π DETECT"):
|
| 512 |
-
with gr.Row():
|
| 513 |
-
with gr.Column():
|
| 514 |
-
detect_input = gr.Image(sources=["upload", "webcam"], type="numpy", height=350)
|
| 515 |
-
threshold_slider = gr.Slider(0.60, 0.90, 0.70, 0.01, label="Template Match Threshold")
|
| 516 |
-
detect_btn = gr.Button("βΆ RUN 2-STAGE DETECTION", variant="primary")
|
| 517 |
-
with gr.Column():
|
| 518 |
-
detect_output = gr.Markdown(label="Detection Report")
|
| 519 |
-
match_label = gr.Label(label="Final Decision")
|
| 520 |
-
|
| 521 |
-
detect_btn.click(
|
| 522 |
-
fn=cb_detect,
|
| 523 |
-
inputs=[detect_input, threshold_slider],
|
| 524 |
-
outputs=[detect_output, match_label],
|
| 525 |
-
api_name="detect_part"
|
| 526 |
-
)
|
| 527 |
-
|
| 528 |
-
# Tab 2: Live Camera
|
| 529 |
-
with gr.TabItem("π· LIVE CAMERA"):
|
| 530 |
-
with gr.Row():
|
| 531 |
-
with gr.Column():
|
| 532 |
-
live_input = gr.Image(sources=["webcam"], streaming=True, type="numpy", height=280)
|
| 533 |
-
live_output = gr.Image(label="Edge Preview", height=280)
|
| 534 |
-
with gr.Column():
|
| 535 |
-
captured_frame = gr.Image(type="numpy", height=280, interactive=False)
|
| 536 |
-
with gr.Row():
|
| 537 |
-
capture_btn = gr.Button("πΈ CAPTURE", variant="primary")
|
| 538 |
-
detect_live_btn = gr.Button("π DETECT", variant="secondary")
|
| 539 |
-
live_threshold = gr.Slider(0.60, 0.90, 0.70, 0.01, label="Threshold")
|
| 540 |
-
live_result = gr.Markdown()
|
| 541 |
-
live_label = gr.Label()
|
| 542 |
-
|
| 543 |
-
live_input.stream(fn=live_edge_preview, inputs=[live_input], outputs=[live_output])
|
| 544 |
-
capture_btn.click(fn=cb_capture, inputs=[live_input], outputs=[captured_frame])
|
| 545 |
-
detect_live_btn.click(fn=cb_detect, inputs=[captured_frame, live_threshold], outputs=[live_result, live_label])
|
| 546 |
-
|
| 547 |
-
# Tab 3: Add Golden Template
|
| 548 |
-
with gr.TabItem("β GOLDEN TEMPLATE"):
|
| 549 |
-
with gr.Row():
|
| 550 |
-
with gr.Column():
|
| 551 |
-
template_input = gr.Image(sources=["upload", "webcam"], type="numpy", height=300)
|
| 552 |
-
part_name_input = gr.Textbox(label="Template Name", placeholder="e.g. perfect_sample_1")
|
| 553 |
-
add_btn = gr.Button("πΎ SAVE AS GOLDEN TEMPLATE", variant="primary")
|
| 554 |
-
with gr.Column():
|
| 555 |
-
add_output = gr.Markdown()
|
| 556 |
-
gr.HTML("<div style='color:#5a6480;padding:10px;'>β οΈ Only save perfect pieces as golden templates. System verifies all saddles are light grey before saving.</div>")
|
| 557 |
-
|
| 558 |
-
add_btn.click(
|
| 559 |
-
fn=cb_add_template,
|
| 560 |
-
inputs=[template_input, part_name_input],
|
| 561 |
-
outputs=[add_output],
|
| 562 |
-
api_name="add_template"
|
| 563 |
-
)
|
| 564 |
-
|
| 565 |
-
# Tab 4: Manage Templates
|
| 566 |
-
with gr.TabItem("π TEMPLATES"):
|
| 567 |
-
with gr.Row():
|
| 568 |
-
with gr.Column():
|
| 569 |
-
template_list = gr.Markdown()
|
| 570 |
-
refresh_btn = gr.Button("β» REFRESH", variant="secondary")
|
| 571 |
-
with gr.Column():
|
| 572 |
-
delete_name = gr.Textbox(label="Delete Template", placeholder="exact name")
|
| 573 |
-
delete_btn = gr.Button("π DELETE", variant="secondary")
|
| 574 |
-
delete_status = gr.Markdown()
|
| 575 |
-
|
| 576 |
-
refresh_btn.click(fn=cb_list_templates, outputs=[template_list], api_name="list_templates")
|
| 577 |
-
delete_btn.click(fn=cb_delete_template, inputs=[delete_name], outputs=[delete_status, template_list])
|
| 578 |
-
|
| 579 |
-
# Hidden raw list endpoint for API access
|
| 580 |
-
raw_list_btn = gr.Button("RAW LIST", visible=False)
|
| 581 |
-
raw_list_btn.click(fn=cb_list_templates_raw, outputs=[], api_name="list_templates_raw")
|
| 582 |
-
|
| 583 |
-
demo.load(fn=cb_list_templates, outputs=[template_list])
|
| 584 |
-
|
| 585 |
-
|
| 586 |
-
if __name__ == "__main__":
|
| 587 |
-
demo.launch(css=CSS, server_name="0.0.0.0", server_port=7860)
|
| 588 |
-
|
| 589 |
-
|
| 590 |
-
|
| 591 |
-
|
| 592 |
-
|
| 593 |
-
# # app.py
|
| 594 |
-
# import gradio as gr
|
| 595 |
-
# import torch
|
| 596 |
-
# import torch.nn as nn
|
| 597 |
-
# from torchvision import models, transforms
|
| 598 |
-
# from PIL import Image
|
| 599 |
-
# import numpy as np
|
| 600 |
-
# import pickle
|
| 601 |
-
# import os
|
| 602 |
-
# import cv2
|
| 603 |
-
# from typing import List, Dict, Tuple, Optional
|
| 604 |
-
# from dataclasses import dataclass
|
| 605 |
-
# from pathlib import Path
|
| 606 |
-
|
| 607 |
-
# @dataclass
|
| 608 |
-
# class TemplateData:
|
| 609 |
-
# """Store multiple feature vectors for robust matching"""
|
| 610 |
-
# features_list: List[np.ndarray]
|
| 611 |
-
# part_name: str
|
| 612 |
-
# num_images: int
|
| 613 |
-
# mean_features: Optional[np.ndarray] = None
|
| 614 |
-
|
| 615 |
-
# def __post_init__(self):
|
| 616 |
-
# """Calculate mean features after initialization"""
|
| 617 |
-
# if self.features_list and self.mean_features is None:
|
| 618 |
-
# self.mean_features = np.mean(self.features_list, axis=0)
|
| 619 |
-
|
| 620 |
-
|
| 621 |
-
# class EnginePartDetector:
|
| 622 |
-
|
| 623 |
-
# def __init__(
|
| 624 |
-
# self,
|
| 625 |
-
# clahe_clip_limit: float = 9.5,
|
| 626 |
-
# clahe_tile_grid: Tuple[int, int] = (8, 8),
|
| 627 |
-
# device: str = 'cuda' if torch.cuda.is_available() else 'cpu',
|
| 628 |
-
# templates_path: str = 'templates.pkl'
|
| 629 |
-
# ):
|
| 630 |
-
# self.device = device
|
| 631 |
-
# self.templates_path = templates_path
|
| 632 |
-
|
| 633 |
-
# # ββ ResNet-50 backbone (feature extractor) βββββββββββββββββββββββ
|
| 634 |
-
# print(f"Loading ResNet50 on {self.device}...")
|
| 635 |
-
# self.model = models.resnet50(weights='IMAGENET1K_V1')
|
| 636 |
-
# self.model = nn.Sequential(*list(self.model.children())[:-1])
|
| 637 |
-
# self.model.to(self.device)
|
| 638 |
-
# self.model.eval()
|
| 639 |
-
|
| 640 |
-
# # ββ CLAHE for contrast enhancement ββββββββββββββββββββββββββββββββ
|
| 641 |
-
# self.clahe = cv2.createCLAHE(
|
| 642 |
-
# clipLimit=clahe_clip_limit,
|
| 643 |
-
# tileGridSize=clahe_tile_grid,
|
| 644 |
-
# )
|
| 645 |
-
|
| 646 |
-
# # ββ Image augmentation for training robustness ββββββββββββββββββββ
|
| 647 |
-
# self.base_transform = transforms.Compose([
|
| 648 |
-
# transforms.Resize((224, 224)),
|
| 649 |
-
# transforms.ToTensor(),
|
| 650 |
-
# transforms.Normalize(
|
| 651 |
-
# mean=[0.485, 0.456, 0.406],
|
| 652 |
-
# std=[0.229, 0.224, 0.225],
|
| 653 |
-
# )
|
| 654 |
-
# ])
|
| 655 |
-
|
| 656 |
-
# # Augmentation for multi-image templates (simulate lighting variations)
|
| 657 |
-
# self.augment_transforms = [
|
| 658 |
-
# # Original
|
| 659 |
-
# self.base_transform,
|
| 660 |
-
# # Brightness variations
|
| 661 |
-
# transforms.Compose([
|
| 662 |
-
# transforms.Resize((224, 224)),
|
| 663 |
-
# transforms.ColorJitter(brightness=0.3),
|
| 664 |
-
# transforms.ToTensor(),
|
| 665 |
-
# transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
|
| 666 |
-
# ]),
|
| 667 |
-
# transforms.Compose([
|
| 668 |
-
# transforms.Resize((224, 224)),
|
| 669 |
-
# transforms.ColorJitter(brightness=0.5),
|
| 670 |
-
# transforms.ToTensor(),
|
| 671 |
-
# transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
|
| 672 |
-
# ]),
|
| 673 |
-
# # Contrast variations
|
| 674 |
-
# transforms.Compose([
|
| 675 |
-
# transforms.Resize((224, 224)),
|
| 676 |
-
# transforms.ColorJitter(contrast=0.3),
|
| 677 |
-
# transforms.ToTensor(),
|
| 678 |
-
# transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
|
| 679 |
-
# ]),
|
| 680 |
-
# ]
|
| 681 |
-
|
| 682 |
-
# self.templates: Dict[str, TemplateData] = {}
|
| 683 |
-
# self.load_templates()
|
| 684 |
-
# print(f"Loaded {len(self.templates)} templates from disk")
|
| 685 |
-
|
| 686 |
-
# # ββ Preprocessing Pipeline ββββββββββββββββββββββββββββββββββββββββββββ
|
| 687 |
-
|
| 688 |
-
# def apply_clahe(self, image: np.ndarray) -> np.ndarray:
|
| 689 |
-
# """Apply CLAHE to enhance contrast and recover shadow details"""
|
| 690 |
-
# bgr = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
|
| 691 |
-
# lab = cv2.cvtColor(bgr, cv2.COLOR_BGR2LAB)
|
| 692 |
-
|
| 693 |
-
# l_channel, a_channel, b_channel = cv2.split(lab)
|
| 694 |
-
# l_enhanced = self.clahe.apply(l_channel)
|
| 695 |
-
|
| 696 |
-
# lab_enhanced = cv2.merge([l_enhanced, a_channel, b_channel])
|
| 697 |
-
# bgr_enhanced = cv2.cvtColor(lab_enhanced, cv2.COLOR_LAB2BGR)
|
| 698 |
-
# rgb_enhanced = cv2.cvtColor(bgr_enhanced, cv2.COLOR_BGR2RGB)
|
| 699 |
-
|
| 700 |
-
# return rgb_enhanced
|
| 701 |
-
|
| 702 |
-
# def preprocess_image(self, image: np.ndarray) -> np.ndarray:
|
| 703 |
-
# """Apply CLAHE and mild denoising"""
|
| 704 |
-
# # Ensure uint8 RGB
|
| 705 |
-
# if image.dtype != np.uint8:
|
| 706 |
-
# image = image.astype(np.uint8)
|
| 707 |
-
|
| 708 |
-
# # CLAHE for contrast enhancement
|
| 709 |
-
# image = self.apply_clahe(image)
|
| 710 |
-
|
| 711 |
-
# # Mild Gaussian blur to reduce metallic sheen noise
|
| 712 |
-
# image = cv2.GaussianBlur(image, (3, 3), 0)
|
| 713 |
-
|
| 714 |
-
# return image
|
| 715 |
-
|
| 716 |
-
# # ββ Feature Extraction ββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 717 |
-
|
| 718 |
-
# @torch.no_grad()
|
| 719 |
-
# def extract_features(self, image, use_augmentation: bool = False) -> np.ndarray:
|
| 720 |
-
|
| 721 |
-
# # Normalize input to numpy uint8 RGB
|
| 722 |
-
# if isinstance(image, Image.Image):
|
| 723 |
-
# image = np.array(image.convert("RGB"))
|
| 724 |
-
# elif isinstance(image, np.ndarray) and image.dtype != np.uint8:
|
| 725 |
-
# image = image.astype(np.uint8)
|
| 726 |
-
|
| 727 |
-
# # Apply preprocessing
|
| 728 |
-
# image = self.preprocess_image(image)
|
| 729 |
-
# image_pil = Image.fromarray(image)
|
| 730 |
-
|
| 731 |
-
# if use_augmentation:
|
| 732 |
-
# # Extract features with multiple augmentations
|
| 733 |
-
# features_list = []
|
| 734 |
-
# for transform in self.augment_transforms:
|
| 735 |
-
# img_tensor = transform(image_pil).unsqueeze(0).to(self.device)
|
| 736 |
-
# features = self.model(img_tensor).squeeze().cpu().numpy()
|
| 737 |
-
# features_list.append(features)
|
| 738 |
-
# return np.array(features_list)
|
| 739 |
-
# else:
|
| 740 |
-
# # Single feature extraction
|
| 741 |
-
# img_tensor = self.base_transform(image_pil).unsqueeze(0).to(self.device)
|
| 742 |
-
# features = self.model(img_tensor).squeeze().cpu().numpy()
|
| 743 |
-
# return features
|
| 744 |
-
|
| 745 |
-
# @torch.no_grad()
|
| 746 |
-
# def extract_batch_features(self, images: List[np.ndarray]) -> List[np.ndarray]:
|
| 747 |
-
# features_list = []
|
| 748 |
-
|
| 749 |
-
# # Preprocess all images
|
| 750 |
-
# preprocessed = []
|
| 751 |
-
# for img in images:
|
| 752 |
-
# if isinstance(img, Image.Image):
|
| 753 |
-
# img = np.array(img.convert("RGB"))
|
| 754 |
-
# img = self.preprocess_image(img)
|
| 755 |
-
# preprocessed.append(Image.fromarray(img))
|
| 756 |
-
|
| 757 |
-
# # Batch processing
|
| 758 |
-
# batch_size = 8
|
| 759 |
-
# for i in range(0, len(preprocessed), batch_size):
|
| 760 |
-
# batch = preprocessed[i:i+batch_size]
|
| 761 |
-
# batch_tensors = torch.stack([
|
| 762 |
-
# self.base_transform(img) for img in batch
|
| 763 |
-
# ]).to(self.device)
|
| 764 |
-
|
| 765 |
-
# batch_features = self.model(batch_tensors).squeeze().cpu().numpy()
|
| 766 |
-
# if len(batch) == 1:
|
| 767 |
-
# batch_features = batch_features.reshape(1, -1)
|
| 768 |
-
|
| 769 |
-
# features_list.extend(batch_features)
|
| 770 |
-
|
| 771 |
-
# return features_list
|
| 772 |
-
|
| 773 |
-
# # ββ Similarity Matching βββββββββββββββββββββββββββββββββββββββββββββββ
|
| 774 |
-
|
| 775 |
-
# @staticmethod
|
| 776 |
-
# def cosine_similarity(feat1: np.ndarray, feat2: np.ndarray) -> float:
|
| 777 |
-
# return np.dot(feat1, feat2) / (np.linalg.norm(feat1) * np.linalg.norm(feat2) + 1e-8)
|
| 778 |
-
|
| 779 |
-
# def compute_multi_similarity(
|
| 780 |
-
# self,
|
| 781 |
-
# query_features: np.ndarray,
|
| 782 |
-
# template_data: TemplateData,
|
| 783 |
-
# method: str = 'mean'
|
| 784 |
-
# ) -> float:
|
| 785 |
-
|
| 786 |
-
# similarities = [
|
| 787 |
-
# self.cosine_similarity(query_features, template_feat)
|
| 788 |
-
# for template_feat in template_data.features_list
|
| 789 |
-
# ]
|
| 790 |
-
|
| 791 |
-
# if method == 'mean':
|
| 792 |
-
# return np.mean(similarities)
|
| 793 |
-
# elif method == 'max':
|
| 794 |
-
# return np.max(similarities)
|
| 795 |
-
# elif method == 'median':
|
| 796 |
-
# return np.median(similarities)
|
| 797 |
-
# else:
|
| 798 |
-
# return np.mean(similarities)
|
| 799 |
-
|
| 800 |
-
# # ββ Template Management βββββββββββββββββββββββββββββββββββββββββββββββ
|
| 801 |
-
|
| 802 |
-
# def save_template(
|
| 803 |
-
# self,
|
| 804 |
-
# images: List[np.ndarray],
|
| 805 |
-
# part_name: str,
|
| 806 |
-
# use_augmentation: bool = True
|
| 807 |
-
# ) -> str:
|
| 808 |
-
|
| 809 |
-
# if not images or not part_name:
|
| 810 |
-
# return "β Please provide both images and part name"
|
| 811 |
-
|
| 812 |
-
# if not part_name.strip():
|
| 813 |
-
# return "β Part name cannot be empty"
|
| 814 |
-
|
| 815 |
-
# print(f"Processing {len(images)} images for '{part_name}'...")
|
| 816 |
-
|
| 817 |
-
# # Extract features from all images
|
| 818 |
-
# all_features = []
|
| 819 |
-
|
| 820 |
-
# for idx, image in enumerate(images):
|
| 821 |
-
# if use_augmentation:
|
| 822 |
-
# # Extract features with augmentation (multiple versions per image)
|
| 823 |
-
# aug_features = self.extract_features(image, use_augmentation=True)
|
| 824 |
-
# all_features.extend(aug_features)
|
| 825 |
-
# print(f" Image {idx+1}: Extracted {len(aug_features)} augmented features")
|
| 826 |
-
# else:
|
| 827 |
-
# # Single feature per image
|
| 828 |
-
# features = self.extract_features(image, use_augmentation=False)
|
| 829 |
-
# all_features.append(features)
|
| 830 |
-
# print(f" Image {idx+1}: Extracted 1 feature")
|
| 831 |
-
|
| 832 |
-
# # Create template
|
| 833 |
-
# template_data = TemplateData(
|
| 834 |
-
# features_list=all_features,
|
| 835 |
-
# part_name=part_name,
|
| 836 |
-
# num_images=len(images)
|
| 837 |
-
# )
|
| 838 |
-
|
| 839 |
-
# self.templates[part_name] = template_data
|
| 840 |
-
|
| 841 |
-
# # Save to disk
|
| 842 |
-
# try:
|
| 843 |
-
# with open(self.templates_path, 'wb') as f:
|
| 844 |
-
# pickle.dump(self.templates, f)
|
| 845 |
-
|
| 846 |
-
# total_features = len(all_features)
|
| 847 |
-
# return (f"β
Template '{part_name}' saved successfully!\n"
|
| 848 |
-
# f"π {len(images)} images β {total_features} feature vectors\n"
|
| 849 |
-
# f"πͺ Strong reference built for robust matching")
|
| 850 |
-
# except Exception as e:
|
| 851 |
-
# return f"β Error saving template: {str(e)}"
|
| 852 |
-
|
| 853 |
-
# def load_templates(self):
|
| 854 |
-
# """Load templates from disk"""
|
| 855 |
-
# if os.path.exists(self.templates_path):
|
| 856 |
-
# try:
|
| 857 |
-
# with open(self.templates_path, 'rb') as f:
|
| 858 |
-
# self.templates = pickle.load(f)
|
| 859 |
-
# print(f"Loaded {len(self.templates)} templates")
|
| 860 |
-
# except Exception as e:
|
| 861 |
-
# print(f"Error loading templates: {e}")
|
| 862 |
-
# self.templates = {}
|
| 863 |
-
# else:
|
| 864 |
-
# self.templates = {}
|
| 865 |
-
|
| 866 |
-
# def delete_template(self, part_name: str) -> str:
|
| 867 |
-
# """Delete a template"""
|
| 868 |
-
# if part_name in self.templates:
|
| 869 |
-
# del self.templates[part_name]
|
| 870 |
-
# with open(self.templates_path, 'wb') as f:
|
| 871 |
-
# pickle.dump(self.templates, f)
|
| 872 |
-
# return f"β
Template '{part_name}' deleted"
|
| 873 |
-
# return f"β Template '{part_name}' not found"
|
| 874 |
-
|
| 875 |
-
# # ββ Part Detection ββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 876 |
-
|
| 877 |
-
# def match_part(
|
| 878 |
-
# self,
|
| 879 |
-
# image: np.ndarray,
|
| 880 |
-
# threshold: float = 0.7,
|
| 881 |
-
# similarity_method: str = 'mean'
|
| 882 |
-
# ) -> Tuple[str, Optional[str]]:
|
| 883 |
-
|
| 884 |
-
# if image is None:
|
| 885 |
-
# return "β Please provide an image", None
|
| 886 |
-
|
| 887 |
-
# if not self.templates:
|
| 888 |
-
# return "β οΈ No templates available. Please add templates first.", None
|
| 889 |
-
|
| 890 |
-
# # Extract query features
|
| 891 |
-
# query_features = self.extract_features(image, use_augmentation=False)
|
| 892 |
-
|
| 893 |
-
# # Compute similarities for all templates
|
| 894 |
-
# results = []
|
| 895 |
-
# for part_name, template_data in self.templates.items():
|
| 896 |
-
# similarity = self.compute_multi_similarity(
|
| 897 |
-
# query_features,
|
| 898 |
-
# template_data,
|
| 899 |
-
# method=similarity_method
|
| 900 |
-
# )
|
| 901 |
-
# results.append((part_name, similarity, template_data.num_images))
|
| 902 |
-
|
| 903 |
-
# # Sort by similarity
|
| 904 |
-
# results.sort(key=lambda x: x[1], reverse=True)
|
| 905 |
-
|
| 906 |
-
# # Format output
|
| 907 |
-
# best_match = results[0]
|
| 908 |
-
# output_text = f"π **Best Match**: {best_match[0]}\n"
|
| 909 |
-
# output_text += f"π **Confidence**: {best_match[1]:.2%}\n"
|
| 910 |
-
# output_text += f"πΈ **Template Size**: {best_match[2]} reference images\n\n"
|
| 911 |
-
|
| 912 |
-
# if best_match[1] >= threshold:
|
| 913 |
-
# output_text += "β
**Status**: MATCHED\n\n"
|
| 914 |
-
# matched_label = best_match[0]
|
| 915 |
-
# else:
|
| 916 |
-
# output_text += "β **Status**: NO MATCH (below threshold)\n\n"
|
| 917 |
-
# matched_label = None
|
| 918 |
-
|
| 919 |
-
# output_text += "**All Results:**\n"
|
| 920 |
-
# for part, sim, num_imgs in results:
|
| 921 |
-
# output_text += f"- {part}: {sim:.2%} ({num_imgs} ref images)\n"
|
| 922 |
-
|
| 923 |
-
# return output_text, matched_label
|
| 924 |
-
|
| 925 |
-
|
| 926 |
-
# # ββ Global Detector Instance ββββββββββββββββββββββββββββββββββββββββββββββ
|
| 927 |
-
|
| 928 |
-
# detector = EnginePartDetector()
|
| 929 |
-
|
| 930 |
-
|
| 931 |
-
# # ββ Gradio Interface Functions ββββββββββββββββββββββββββββββββββββββββββββ
|
| 932 |
-
|
| 933 |
-
# def add_template(images, part_name, use_augmentation):
|
| 934 |
-
# """Add template from multiple images"""
|
| 935 |
-
# if images is None or len(images) == 0:
|
| 936 |
-
# return "β Please upload at least one image"
|
| 937 |
-
|
| 938 |
-
# # Convert images to list if single image
|
| 939 |
-
# if not isinstance(images, list):
|
| 940 |
-
# images = [images]
|
| 941 |
-
|
| 942 |
-
# return detector.save_template(images, part_name.strip(), use_augmentation)
|
| 943 |
-
|
| 944 |
-
|
| 945 |
-
# def detect_part(image, threshold, method):
|
| 946 |
-
# """Detect part in image"""
|
| 947 |
-
# return detector.match_part(image, threshold, method)
|
| 948 |
-
|
| 949 |
-
|
| 950 |
-
# def list_templates():
|
| 951 |
-
# """List all saved templates"""
|
| 952 |
-
# if not detector.templates:
|
| 953 |
-
# return "π No templates saved yet"
|
| 954 |
-
|
| 955 |
-
# output = "π **Saved Templates:**\n\n"
|
| 956 |
-
# for name, template_data in detector.templates.items():
|
| 957 |
-
# total_features = len(template_data.features_list)
|
| 958 |
-
# output += f"- **{name}**\n"
|
| 959 |
-
# output += f" ββ {template_data.num_images} images β {total_features} feature vectors\n\n"
|
| 960 |
-
|
| 961 |
-
# return output
|
| 962 |
-
|
| 963 |
-
|
| 964 |
-
# def delete_template_ui(part_name):
|
| 965 |
-
# """Delete a template"""
|
| 966 |
-
# return detector.delete_template(part_name)
|
| 967 |
-
|
| 968 |
-
|
| 969 |
-
# # ββ Gradio UI βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 970 |
-
|
| 971 |
-
# with gr.Blocks(title="Engine Part Detection System", theme=gr.themes.Soft()) as demo:
|
| 972 |
-
# gr.Markdown("""
|
| 973 |
-
# # π§ Engine Part Detection System
|
| 974 |
-
# ### Multi-Image Template Matching with ResNet50
|
| 975 |
-
|
| 976 |
-
# **Key Features:**
|
| 977 |
-
# - πΌοΈ **Multi-Image Templates**: Build robust references from multiple angles/lighting
|
| 978 |
-
# - π‘ **Lighting Resistant**: CLAHE preprocessing + augmentation handles shadows
|
| 979 |
-
# - π― **High Accuracy**: Multiple feature vectors per template ensure reliable matching
|
| 980 |
-
|
| 981 |
-
# **Workflow:**
|
| 982 |
-
# 1. **Add Templates**: Upload 3-10 images per part (different angles/lighting)
|
| 983 |
-
# 2. **Detect Parts**: Real-time matching with confidence scores
|
| 984 |
-
# """)
|
| 985 |
-
|
| 986 |
-
# with gr.Tab("π Detect Part"):
|
| 987 |
-
# gr.Markdown("### Upload or capture an image to identify the engine part")
|
| 988 |
-
|
| 989 |
-
# with gr.Row():
|
| 990 |
-
# with gr.Column():
|
| 991 |
-
# detect_input = gr.Image(
|
| 992 |
-
# sources=["upload", "webcam"],
|
| 993 |
-
# type="numpy",
|
| 994 |
-
# label="Input Image"
|
| 995 |
-
# )
|
| 996 |
-
|
| 997 |
-
# with gr.Row():
|
| 998 |
-
# threshold_slider = gr.Slider(
|
| 999 |
-
# 0.5, 0.95,
|
| 1000 |
-
# value=0.7,
|
| 1001 |
-
# step=0.05,
|
| 1002 |
-
# label="Similarity Threshold"
|
| 1003 |
-
# )
|
| 1004 |
-
# method_dropdown = gr.Dropdown(
|
| 1005 |
-
# choices=['mean', 'max', 'median'],
|
| 1006 |
-
# value='mean',
|
| 1007 |
-
# label="Matching Method"
|
| 1008 |
-
# )
|
| 1009 |
-
|
| 1010 |
-
# detect_btn = gr.Button("π Detect Part", variant="primary", size="lg")
|
| 1011 |
-
|
| 1012 |
-
# with gr.Column():
|
| 1013 |
-
# detect_output = gr.Textbox(
|
| 1014 |
-
# label="Detection Results",
|
| 1015 |
-
# lines=12,
|
| 1016 |
-
# max_lines=15
|
| 1017 |
-
# )
|
| 1018 |
-
# match_label = gr.Label(label="Matched Part", num_top_classes=3)
|
| 1019 |
-
|
| 1020 |
-
# detect_btn.click(
|
| 1021 |
-
# fn=detect_part,
|
| 1022 |
-
# inputs=[detect_input, threshold_slider, method_dropdown],
|
| 1023 |
-
# outputs=[detect_output, match_label],
|
| 1024 |
-
# api_name="detect"
|
| 1025 |
-
# )
|
| 1026 |
-
|
| 1027 |
-
# with gr.Tab("β Add Template"):
|
| 1028 |
-
# gr.Markdown("""
|
| 1029 |
-
# ### Build a Golden Template
|
| 1030 |
-
# Upload **3-10 images** of the same part with:
|
| 1031 |
-
# - Different lighting conditions (bright, dim, shadows)
|
| 1032 |
-
# - Multiple angles (front, side, top)
|
| 1033 |
-
# - Various backgrounds
|
| 1034 |
-
# """)
|
| 1035 |
-
|
| 1036 |
-
# with gr.Row():
|
| 1037 |
-
# with gr.Column():
|
| 1038 |
-
# template_input = gr.File(
|
| 1039 |
-
# file_count="multiple",
|
| 1040 |
-
# file_types=["image"],
|
| 1041 |
-
# label="πΈ Upload Multiple Images (3-10 recommended)"
|
| 1042 |
-
# )
|
| 1043 |
-
|
| 1044 |
-
# part_name_input = gr.Textbox(
|
| 1045 |
-
# label="Part Name",
|
| 1046 |
-
# placeholder="e.g., spark_plug, piston, cylinder_head",
|
| 1047 |
-
# info="Use descriptive names without spaces"
|
| 1048 |
-
# )
|
| 1049 |
-
|
| 1050 |
-
# augmentation_checkbox = gr.Checkbox(
|
| 1051 |
-
# value=True,
|
| 1052 |
-
# label="π Enable Augmentation (Recommended)",
|
| 1053 |
-
# info="Automatically generates variations for robustness"
|
| 1054 |
-
# )
|
| 1055 |
-
|
| 1056 |
-
# add_btn = gr.Button("πΎ Save Template", variant="primary", size="lg")
|
| 1057 |
-
|
| 1058 |
-
# with gr.Column():
|
| 1059 |
-
# add_output = gr.Textbox(
|
| 1060 |
-
# label="Status",
|
| 1061 |
-
# lines=8,
|
| 1062 |
-
# max_lines=10
|
| 1063 |
-
# )
|
| 1064 |
-
|
| 1065 |
-
# gr.Markdown("""
|
| 1066 |
-
# **Tips:**
|
| 1067 |
-
# - More images = better accuracy
|
| 1068 |
-
# - Vary lighting: normal, bright, shadowed
|
| 1069 |
-
# - Include different angles
|
| 1070 |
-
# - Augmentation adds 4x variations per image
|
| 1071 |
-
# """)
|
| 1072 |
-
|
| 1073 |
-
# # Handle file upload and conversion
|
| 1074 |
-
# def process_files(files):
|
| 1075 |
-
# if files is None:
|
| 1076 |
-
# return []
|
| 1077 |
-
# images = []
|
| 1078 |
-
# for file in files:
|
| 1079 |
-
# img = Image.open(file.name)
|
| 1080 |
-
# images.append(np.array(img))
|
| 1081 |
-
# return images
|
| 1082 |
-
|
| 1083 |
-
# template_images_state = gr.State([])
|
| 1084 |
-
|
| 1085 |
-
# template_input.change(
|
| 1086 |
-
# fn=process_files,
|
| 1087 |
-
# inputs=template_input,
|
| 1088 |
-
# outputs=template_images_state
|
| 1089 |
-
# )
|
| 1090 |
-
|
| 1091 |
-
# add_btn.click(
|
| 1092 |
-
# fn=add_template,
|
| 1093 |
-
# inputs=[template_images_state, part_name_input, augmentation_checkbox],
|
| 1094 |
-
# outputs=add_output,
|
| 1095 |
-
# api_name="add_template"
|
| 1096 |
-
# )
|
| 1097 |
-
|
| 1098 |
-
# with gr.Tab("π Manage Templates"):
|
| 1099 |
-
# gr.Markdown("### View and manage saved templates")
|
| 1100 |
-
|
| 1101 |
-
# with gr.Row():
|
| 1102 |
-
# with gr.Column():
|
| 1103 |
-
# template_list = gr.Textbox(
|
| 1104 |
-
# label="Saved Templates",
|
| 1105 |
-
# lines=15,
|
| 1106 |
-
# max_lines=20
|
| 1107 |
-
# )
|
| 1108 |
-
# refresh_btn = gr.Button("π Refresh List", size="sm")
|
| 1109 |
-
|
| 1110 |
-
# with gr.Column():
|
| 1111 |
-
# delete_part_name = gr.Textbox(
|
| 1112 |
-
# label="Template Name to Delete",
|
| 1113 |
-
# placeholder="Enter exact part name"
|
| 1114 |
-
# )
|
| 1115 |
-
# delete_btn = gr.Button("ποΈ Delete Template", variant="stop")
|
| 1116 |
-
# delete_output = gr.Textbox(label="Status", lines=3)
|
| 1117 |
-
|
| 1118 |
-
# refresh_btn.click(
|
| 1119 |
-
# fn=list_templates,
|
| 1120 |
-
# outputs=template_list,
|
| 1121 |
-
# api_name="list_templates"
|
| 1122 |
-
# )
|
| 1123 |
-
|
| 1124 |
-
# delete_btn.click(
|
| 1125 |
-
# fn=delete_template_ui,
|
| 1126 |
-
# inputs=delete_part_name,
|
| 1127 |
-
# outputs=delete_output
|
| 1128 |
-
# ).then(
|
| 1129 |
-
# fn=list_templates,
|
| 1130 |
-
# outputs=template_list
|
| 1131 |
-
# )
|
| 1132 |
-
|
| 1133 |
-
# # Auto-load templates on tab open
|
| 1134 |
-
# demo.load(fn=list_templates, outputs=template_list)
|
| 1135 |
-
|
| 1136 |
-
# with gr.Tab("βΉοΈ Help"):
|
| 1137 |
-
# gr.Markdown("""
|
| 1138 |
-
# ## How It Works
|
| 1139 |
-
|
| 1140 |
-
# ### π― Multi-Image Template Matching
|
| 1141 |
-
# Each template consists of multiple feature vectors extracted from:
|
| 1142 |
-
# - Your uploaded reference images (3-10 recommended)
|
| 1143 |
-
# - Automatic augmentations (brightness, contrast variations)
|
| 1144 |
-
|
| 1145 |
-
# This creates a **"golden template"** that's robust to:
|
| 1146 |
-
# - β
Lighting changes (shadows, bright spots)
|
| 1147 |
-
# - β
Different angles and perspectives
|
| 1148 |
-
# - β
Background variations
|
| 1149 |
-
# - β
Camera/phone variations
|
| 1150 |
-
|
| 1151 |
-
# ### π¬ Technical Details
|
| 1152 |
-
# - **Backbone**: ResNet50 pre-trained on ImageNet
|
| 1153 |
-
# - **Preprocessing**: CLAHE (contrast enhancement) + Gaussian blur
|
| 1154 |
-
# - **Matching**: Cosine similarity across multiple feature vectors
|
| 1155 |
-
# - **Method Options**:
|
| 1156 |
-
# - `mean`: Average similarity (balanced, recommended)
|
| 1157 |
-
# - `max`: Best single match (more lenient)
|
| 1158 |
-
# - `median`: Middle similarity (outlier-resistant)
|
| 1159 |
-
|
| 1160 |
-
# ### π‘ Best Practices
|
| 1161 |
-
|
| 1162 |
-
# 1. **Template Creation**:
|
| 1163 |
-
# - Upload 5-7 images per part minimum
|
| 1164 |
-
# - Include normal, bright, and shadowed lighting
|
| 1165 |
-
# - Capture multiple angles
|
| 1166 |
-
# - Keep augmentation enabled
|
| 1167 |
-
|
| 1168 |
-
# 2. **Detection**:
|
| 1169 |
-
# - Start with 0.70 threshold
|
| 1170 |
-
# - Adjust based on results (lower = more lenient)
|
| 1171 |
-
# - Use `mean` method for general use
|
| 1172 |
-
|
| 1173 |
-
# 3. **Troubleshooting**:
|
| 1174 |
-
# - Low confidence? Add more template images
|
| 1175 |
-
# - False positives? Increase threshold
|
| 1176 |
-
# - Missing detections? Lower threshold or use `max` method
|
| 1177 |
-
|
| 1178 |
-
# ### π Example Workflow
|
| 1179 |
-
# ```
|
| 1180 |
-
# Template Building:
|
| 1181 |
-
# ββ Part: "spark_plug"
|
| 1182 |
-
# ββ Images: 5 photos
|
| 1183 |
-
# ββ Augmentation: 4x per image
|
| 1184 |
-
# ββ Total: 20 feature vectors
|
| 1185 |
-
|
| 1186 |
-
# Detection:
|
| 1187 |
-
# ββ Query: New spark plug photo
|
| 1188 |
-
# ββ Similarity: 87% (mean across 20 vectors)
|
| 1189 |
-
# ββ Result: β
MATCHED
|
| 1190 |
-
# ```
|
| 1191 |
-
# """)
|
| 1192 |
-
|
| 1193 |
-
|
| 1194 |
-
# if __name__ == "__main__":
|
| 1195 |
-
# demo.launch(
|
| 1196 |
-
# server_name="0.0.0.0",
|
| 1197 |
-
# server_port=7860,
|
| 1198 |
-
# share=False
|
| 1199 |
-
# )
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app.py
ADDED
|
@@ -0,0 +1,753 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
import os
|
| 3 |
+
import sys
|
| 4 |
+
import cv2
|
| 5 |
+
import time
|
| 6 |
+
import asyncio
|
| 7 |
+
import logging
|
| 8 |
+
import argparse
|
| 9 |
+
import threading
|
| 10 |
+
import numpy as np
|
| 11 |
+
from enum import Enum
|
| 12 |
+
from typing import Optional, Dict, Any, Callable, Tuple, List
|
| 13 |
+
from dataclasses import dataclass, field
|
| 14 |
+
from datetime import datetime
|
| 15 |
+
from PIL import Image
|
| 16 |
+
|
| 17 |
+
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 18 |
+
# Logging
|
| 19 |
+
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 20 |
+
|
| 21 |
+
logging.basicConfig(
|
| 22 |
+
level=logging.INFO,
|
| 23 |
+
format="%(asctime)s β %(name)-12s β %(levelname)-7s β %(message)s",
|
| 24 |
+
datefmt="%H:%M:%S",
|
| 25 |
+
)
|
| 26 |
+
log = logging.getLogger("detection")
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 30 |
+
# Data Models
|
| 31 |
+
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 32 |
+
|
| 33 |
+
class Verdict(str, Enum):
|
| 34 |
+
PASS = "PASS"
|
| 35 |
+
FAIL = "FAIL"
|
| 36 |
+
UNKNOWN = "UNKNOWN"
|
| 37 |
+
ERROR = "ERROR"
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
@dataclass
|
| 41 |
+
class QualityMetrics:
|
| 42 |
+
"""Image quality measurements."""
|
| 43 |
+
brightness: float = 0.0
|
| 44 |
+
contrast: float = 0.0
|
| 45 |
+
sharpness: float = 0.0
|
| 46 |
+
is_blurred: bool = False
|
| 47 |
+
resolution: Tuple[int, int] = (0, 0)
|
| 48 |
+
|
| 49 |
+
@property
|
| 50 |
+
def quality_score(self) -> float:
|
| 51 |
+
return min(100.0, self.sharpness / 2.0)
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
@dataclass
|
| 55 |
+
class SegmentedROI:
|
| 56 |
+
"""A detected region of interest from segmentation."""
|
| 57 |
+
bbox: Tuple[int, int, int, int] # x, y, w, h
|
| 58 |
+
contour: Any = None
|
| 59 |
+
cropped_image: Optional[Image.Image] = None
|
| 60 |
+
mask: Optional[np.ndarray] = None
|
| 61 |
+
area: float = 0.0
|
| 62 |
+
circularity: float = 0.0
|
| 63 |
+
label: str = "part"
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
@dataclass
|
| 67 |
+
class DetectionResult:
|
| 68 |
+
"""Complete result of a single detection pass."""
|
| 69 |
+
verdict: Verdict = Verdict.UNKNOWN
|
| 70 |
+
confidence: float = 0.0
|
| 71 |
+
matched_class: str = ""
|
| 72 |
+
quality: QualityMetrics = field(default_factory=QualityMetrics)
|
| 73 |
+
visualization_b64: Optional[str] = None
|
| 74 |
+
all_scores: Dict[str, float] = field(default_factory=dict)
|
| 75 |
+
segments_found: int = 0
|
| 76 |
+
status_detail: str = ""
|
| 77 |
+
timestamp: str = ""
|
| 78 |
+
elapsed_ms: float = 0.0
|
| 79 |
+
|
| 80 |
+
def to_dict(self) -> Dict[str, Any]:
|
| 81 |
+
return {
|
| 82 |
+
"verdict": self.verdict.value,
|
| 83 |
+
"confidence": round(self.confidence, 4),
|
| 84 |
+
"matched_class": self.matched_class,
|
| 85 |
+
"quality": {
|
| 86 |
+
"brightness": round(self.quality.brightness, 2),
|
| 87 |
+
"contrast": round(self.quality.contrast, 2),
|
| 88 |
+
"sharpness": round(self.quality.sharpness, 2),
|
| 89 |
+
"is_blurred": self.quality.is_blurred,
|
| 90 |
+
"quality_score": round(self.quality.quality_score, 2),
|
| 91 |
+
"resolution": list(self.quality.resolution),
|
| 92 |
+
},
|
| 93 |
+
"visualization": self.visualization_b64,
|
| 94 |
+
"all_scores": self.all_scores,
|
| 95 |
+
"segments_found": self.segments_found,
|
| 96 |
+
"status_detail": self.status_detail,
|
| 97 |
+
"timestamp": self.timestamp,
|
| 98 |
+
"elapsed_ms": round(self.elapsed_ms, 1),
|
| 99 |
+
}
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
@dataclass
|
| 103 |
+
class SessionStats:
|
| 104 |
+
"""Running totals for an auto-inspection session."""
|
| 105 |
+
total: int = 0
|
| 106 |
+
passed: int = 0
|
| 107 |
+
failed: int = 0
|
| 108 |
+
unknown: int = 0
|
| 109 |
+
errors: int = 0
|
| 110 |
+
start_time: Optional[float] = None
|
| 111 |
+
|
| 112 |
+
@property
|
| 113 |
+
def elapsed_seconds(self) -> float:
|
| 114 |
+
if self.start_time is None:
|
| 115 |
+
return 0.0
|
| 116 |
+
return time.time() - self.start_time
|
| 117 |
+
|
| 118 |
+
def record(self, verdict: Verdict):
|
| 119 |
+
self.total += 1
|
| 120 |
+
if verdict == Verdict.PASS:
|
| 121 |
+
self.passed += 1
|
| 122 |
+
elif verdict == Verdict.FAIL:
|
| 123 |
+
self.failed += 1
|
| 124 |
+
elif verdict == Verdict.UNKNOWN:
|
| 125 |
+
self.unknown += 1
|
| 126 |
+
else:
|
| 127 |
+
self.errors += 1
|
| 128 |
+
|
| 129 |
+
def to_dict(self) -> Dict[str, Any]:
|
| 130 |
+
return {
|
| 131 |
+
"total": self.total,
|
| 132 |
+
"passed": self.passed,
|
| 133 |
+
"failed": self.failed,
|
| 134 |
+
"unknown": self.unknown,
|
| 135 |
+
"errors": self.errors,
|
| 136 |
+
"elapsed_seconds": round(self.elapsed_seconds, 1),
|
| 137 |
+
}
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 141 |
+
# Image Analyzer β Validation, Quality, and Segmentation
|
| 142 |
+
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 143 |
+
|
| 144 |
+
class ImageAnalyzer:
|
| 145 |
+
"""
|
| 146 |
+
Handles all pre-AI image analysis:
|
| 147 |
+
- Quality validation (brightness, contrast, sharpness)
|
| 148 |
+
- Part segmentation via contour + morphological analysis
|
| 149 |
+
- ROI extraction for focused detection
|
| 150 |
+
"""
|
| 151 |
+
|
| 152 |
+
# Thresholds
|
| 153 |
+
MIN_RESOLUTION = (320, 240)
|
| 154 |
+
MAX_INPUT_DIM = 1024
|
| 155 |
+
BRIGHTNESS_FLOOR = 15
|
| 156 |
+
BRIGHTNESS_CEIL = 245
|
| 157 |
+
CONTRAST_FLOOR = 5
|
| 158 |
+
BLUR_THRESHOLD = 100.0 # Laplacian variance below this = blurry
|
| 159 |
+
|
| 160 |
+
# Segmentation tunables
|
| 161 |
+
MORPHO_KERNEL = 5
|
| 162 |
+
MIN_CONTOUR_AREA_RATIO = 0.005 # Minimum area relative to image area
|
| 163 |
+
MAX_CONTOUR_AREA_RATIO = 0.85 # Maximum area relative to image area
|
| 164 |
+
CIRCULARITY_THRESHOLD = 0.15 # Minimum circularity for a valid part contour
|
| 165 |
+
|
| 166 |
+
def measure_quality(self, img: Image.Image) -> QualityMetrics:
|
| 167 |
+
"""Compute image quality metrics without modifying the image."""
|
| 168 |
+
arr = np.array(img.convert("RGB"))
|
| 169 |
+
gray = cv2.cvtColor(arr, cv2.COLOR_RGB2GRAY)
|
| 170 |
+
laplacian_var = float(cv2.Laplacian(gray, cv2.CV_64F).var())
|
| 171 |
+
|
| 172 |
+
return QualityMetrics(
|
| 173 |
+
brightness=float(np.mean(arr)),
|
| 174 |
+
contrast=float(np.std(arr)),
|
| 175 |
+
sharpness=laplacian_var,
|
| 176 |
+
is_blurred=laplacian_var < self.BLUR_THRESHOLD,
|
| 177 |
+
resolution=(img.width, img.height),
|
| 178 |
+
)
|
| 179 |
+
|
| 180 |
+
def validate(self, img: Image.Image) -> Tuple[bool, str]:
|
| 181 |
+
|
| 182 |
+
w, h = img.size
|
| 183 |
+
if w < self.MIN_RESOLUTION[0] or h < self.MIN_RESOLUTION[1]:
|
| 184 |
+
return False, f"Resolution too low: {w}Γ{h} (need {self.MIN_RESOLUTION[0]}Γ{self.MIN_RESOLUTION[1]})"
|
| 185 |
+
|
| 186 |
+
aspect = w / h
|
| 187 |
+
if aspect < 0.2 or aspect > 5.0:
|
| 188 |
+
return False, f"Unusual aspect ratio: {aspect:.2f}"
|
| 189 |
+
|
| 190 |
+
metrics = self.measure_quality(img)
|
| 191 |
+
if metrics.brightness < self.BRIGHTNESS_FLOOR:
|
| 192 |
+
return False, "Image too dark"
|
| 193 |
+
if metrics.brightness > self.BRIGHTNESS_CEIL:
|
| 194 |
+
return False, "Image too bright / overexposed"
|
| 195 |
+
if metrics.contrast < self.CONTRAST_FLOOR:
|
| 196 |
+
return False, "Insufficient contrast β blank or uniform image"
|
| 197 |
+
|
| 198 |
+
return True, "OK"
|
| 199 |
+
|
| 200 |
+
def prepare(self, img: Image.Image) -> Image.Image:
|
| 201 |
+
|
| 202 |
+
if img.mode != "RGB":
|
| 203 |
+
img = img.convert("RGB")
|
| 204 |
+
img.thumbnail((self.MAX_INPUT_DIM, self.MAX_INPUT_DIM), Image.Resampling.LANCZOS)
|
| 205 |
+
return img
|
| 206 |
+
|
| 207 |
+
# ββ Part Segmentation ββββββββββββββββββββββββββββββββββββββββ
|
| 208 |
+
|
| 209 |
+
def segment_parts(self, img: Image.Image) -> List[SegmentedROI]:
|
| 210 |
+
|
| 211 |
+
arr = np.array(img.convert("RGB"))
|
| 212 |
+
gray = cv2.cvtColor(arr, cv2.COLOR_RGB2GRAY)
|
| 213 |
+
img_area = gray.shape[0] * gray.shape[1]
|
| 214 |
+
|
| 215 |
+
# Adaptive threshold deals better with shadows than global Otsu
|
| 216 |
+
binary = cv2.adaptiveThreshold(
|
| 217 |
+
gray, 255,
|
| 218 |
+
cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
|
| 219 |
+
cv2.THRESH_BINARY_INV,
|
| 220 |
+
blockSize=31,
|
| 221 |
+
C=10,
|
| 222 |
+
)
|
| 223 |
+
|
| 224 |
+
# Morphological closing fills holes inside parts
|
| 225 |
+
kernel = cv2.getStructuringElement(
|
| 226 |
+
cv2.MORPH_ELLIPSE,
|
| 227 |
+
(self.MORPHO_KERNEL, self.MORPHO_KERNEL),
|
| 228 |
+
)
|
| 229 |
+
closed = cv2.morphologyEx(binary, cv2.MORPH_CLOSE, kernel, iterations=3)
|
| 230 |
+
|
| 231 |
+
# Optional: small opening to remove noise specks
|
| 232 |
+
opened = cv2.morphologyEx(closed, cv2.MORPH_OPEN, kernel, iterations=1)
|
| 233 |
+
|
| 234 |
+
contours, _ = cv2.findContours(opened, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
| 235 |
+
|
| 236 |
+
rois: List[SegmentedROI] = []
|
| 237 |
+
for cnt in contours:
|
| 238 |
+
area = cv2.contourArea(cnt)
|
| 239 |
+
ratio = area / img_area
|
| 240 |
+
|
| 241 |
+
# Filter by relative area
|
| 242 |
+
if ratio < self.MIN_CONTOUR_AREA_RATIO or ratio > self.MAX_CONTOUR_AREA_RATIO:
|
| 243 |
+
continue
|
| 244 |
+
|
| 245 |
+
# Circularity = 4Ο Γ area / perimeterΒ² (1.0 for perfect circle)
|
| 246 |
+
perimeter = cv2.arcLength(cnt, True)
|
| 247 |
+
circularity = (4 * np.pi * area / (perimeter ** 2)) if perimeter > 0 else 0
|
| 248 |
+
|
| 249 |
+
if circularity < self.CIRCULARITY_THRESHOLD:
|
| 250 |
+
continue
|
| 251 |
+
|
| 252 |
+
x, y, w, h = cv2.boundingRect(cnt)
|
| 253 |
+
|
| 254 |
+
# Create a mask for this contour and crop
|
| 255 |
+
mask = np.zeros(gray.shape, dtype=np.uint8)
|
| 256 |
+
cv2.drawContours(mask, [cnt], -1, 255, thickness=cv2.FILLED)
|
| 257 |
+
|
| 258 |
+
# Crop the bounding box region
|
| 259 |
+
crop_arr = arr[y:y + h, x:x + w].copy()
|
| 260 |
+
crop_mask = mask[y:y + h, x:x + w]
|
| 261 |
+
# Apply mask β set background to black
|
| 262 |
+
crop_arr[crop_mask == 0] = 0
|
| 263 |
+
cropped_pil = Image.fromarray(crop_arr)
|
| 264 |
+
|
| 265 |
+
rois.append(SegmentedROI(
|
| 266 |
+
bbox=(x, y, w, h),
|
| 267 |
+
contour=cnt,
|
| 268 |
+
cropped_image=cropped_pil,
|
| 269 |
+
mask=crop_mask,
|
| 270 |
+
area=area,
|
| 271 |
+
circularity=circularity,
|
| 272 |
+
label=f"part_{len(rois)}",
|
| 273 |
+
))
|
| 274 |
+
|
| 275 |
+
# Sort by area descending β largest part first
|
| 276 |
+
rois.sort(key=lambda r: r.area, reverse=True)
|
| 277 |
+
log.info(f"Segmentation: found {len(rois)} part region(s) from {len(contours)} contours")
|
| 278 |
+
return rois
|
| 279 |
+
|
| 280 |
+
def draw_segmentation_overlay(
|
| 281 |
+
self, img: Image.Image, rois: List[SegmentedROI], verdict: Optional[Verdict] = None
|
| 282 |
+
) -> Image.Image:
|
| 283 |
+
|
| 284 |
+
arr = np.array(img.convert("RGB")).copy()
|
| 285 |
+
|
| 286 |
+
color_map = {
|
| 287 |
+
Verdict.PASS: (0, 200, 100),
|
| 288 |
+
Verdict.FAIL: (220, 60, 60),
|
| 289 |
+
Verdict.UNKNOWN: (220, 180, 0),
|
| 290 |
+
Verdict.ERROR: (128, 128, 128),
|
| 291 |
+
None: (100, 180, 255),
|
| 292 |
+
}
|
| 293 |
+
color = color_map.get(verdict, (100, 180, 255))
|
| 294 |
+
|
| 295 |
+
for roi in rois:
|
| 296 |
+
x, y, w, h = roi.bbox
|
| 297 |
+
cv2.rectangle(arr, (x, y), (x + w, y + h), color, 2)
|
| 298 |
+
|
| 299 |
+
# Label with area info
|
| 300 |
+
label = f"{roi.label} ({roi.circularity:.2f})"
|
| 301 |
+
font_scale = max(0.4, min(1.0, w / 300))
|
| 302 |
+
cv2.putText(arr, label, (x, max(y - 8, 15)),
|
| 303 |
+
cv2.FONT_HERSHEY_SIMPLEX, font_scale, color, 1, cv2.LINE_AA)
|
| 304 |
+
|
| 305 |
+
# Verdict stamp in top-right
|
| 306 |
+
if verdict is not None:
|
| 307 |
+
stamp = verdict.value
|
| 308 |
+
(tw, th), _ = cv2.getTextSize(stamp, cv2.FONT_HERSHEY_SIMPLEX, 1.2, 3)
|
| 309 |
+
sx = arr.shape[1] - tw - 20
|
| 310 |
+
sy = th + 20
|
| 311 |
+
cv2.rectangle(arr, (sx - 10, sy - th - 10), (sx + tw + 10, sy + 10), color, cv2.FILLED)
|
| 312 |
+
cv2.putText(arr, stamp, (sx, sy),
|
| 313 |
+
cv2.FONT_HERSHEY_SIMPLEX, 1.2, (255, 255, 255), 3, cv2.LINE_AA)
|
| 314 |
+
|
| 315 |
+
return Image.fromarray(arr)
|
| 316 |
+
|
| 317 |
+
|
| 318 |
+
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 319 |
+
# Detection Engine β Orchestrates the full pipeline
|
| 320 |
+
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 321 |
+
|
| 322 |
+
class DetectionEngine:
|
| 323 |
+
|
| 324 |
+
|
| 325 |
+
def __init__(self, hf_client=None, threshold: float = 0.70):
|
| 326 |
+
self.analyzer = ImageAnalyzer()
|
| 327 |
+
self.threshold = threshold
|
| 328 |
+
|
| 329 |
+
# Lazy-init the HF client so index.py stays importable
|
| 330 |
+
# without triggering network calls at import time.
|
| 331 |
+
self._hf_client = hf_client
|
| 332 |
+
self._hf_initialized = hf_client is not None
|
| 333 |
+
|
| 334 |
+
@property
|
| 335 |
+
def hf(self):
|
| 336 |
+
if not self._hf_initialized:
|
| 337 |
+
from hf_client import HuggingFaceClient
|
| 338 |
+
self._hf_client = HuggingFaceClient()
|
| 339 |
+
self._hf_initialized = True
|
| 340 |
+
return self._hf_client
|
| 341 |
+
|
| 342 |
+
async def run(self, img: Image.Image, threshold: Optional[float] = None) -> DetectionResult:
|
| 343 |
+
|
| 344 |
+
t0 = time.time()
|
| 345 |
+
result = DetectionResult(timestamp=datetime.utcnow().isoformat())
|
| 346 |
+
thr = threshold if threshold is not None else self.threshold
|
| 347 |
+
|
| 348 |
+
# ββ Step 1: Quality Gate βββββββββββββββββββββββββββββββββ
|
| 349 |
+
valid, reason = self.analyzer.validate(img)
|
| 350 |
+
result.quality = self.analyzer.measure_quality(img)
|
| 351 |
+
|
| 352 |
+
if not valid:
|
| 353 |
+
result.verdict = Verdict.ERROR
|
| 354 |
+
result.status_detail = f"Quality rejected: {reason}"
|
| 355 |
+
result.elapsed_ms = (time.time() - t0) * 1000
|
| 356 |
+
log.warning(f"Quality gate failed: {reason}")
|
| 357 |
+
return result
|
| 358 |
+
|
| 359 |
+
# ββ Step 2: Segment Parts ββββββββββββββββββββββββββββββββ
|
| 360 |
+
rois = self.analyzer.segment_parts(img)
|
| 361 |
+
result.segments_found = len(rois)
|
| 362 |
+
|
| 363 |
+
if len(rois) == 0:
|
| 364 |
+
log.info("No part segments found β sending full image to AI")
|
| 365 |
+
|
| 366 |
+
# ββ Step 3: Prepare for AI βββββββββββββββββββββββββββββββ
|
| 367 |
+
# Send the full image (the backend has its own ROI logic).
|
| 368 |
+
# The segmentation here is for local overlay + future use.
|
| 369 |
+
prepared = self.analyzer.prepare(img)
|
| 370 |
+
|
| 371 |
+
# ββ Step 4: AI Classification ββββββββββββββββββββββββββββ
|
| 372 |
+
try:
|
| 373 |
+
ai_result = await self.hf.detect_part(prepared, thr)
|
| 374 |
+
except Exception as exc:
|
| 375 |
+
result.verdict = Verdict.ERROR
|
| 376 |
+
result.status_detail = f"AI backend error: {exc}"
|
| 377 |
+
result.elapsed_ms = (time.time() - t0) * 1000
|
| 378 |
+
log.error(f"AI call failed: {exc}")
|
| 379 |
+
return result
|
| 380 |
+
|
| 381 |
+
if not ai_result.get("success"):
|
| 382 |
+
result.verdict = Verdict.ERROR
|
| 383 |
+
result.status_detail = f"AI returned failure: {ai_result.get('error', 'unknown')}"
|
| 384 |
+
result.elapsed_ms = (time.time() - t0) * 1000
|
| 385 |
+
return result
|
| 386 |
+
|
| 387 |
+
# ββ Step 5: Interpret Verdict ββββββββββββββββββββββββββββ
|
| 388 |
+
best_match = str(ai_result.get("best_match", "")).strip()
|
| 389 |
+
confidence = float(ai_result.get("confidence", 0.0))
|
| 390 |
+
all_scores = ai_result.get("all_scores", {})
|
| 391 |
+
status_text = str(ai_result.get("status_text", ""))
|
| 392 |
+
|
| 393 |
+
result.confidence = confidence
|
| 394 |
+
result.matched_class = best_match
|
| 395 |
+
result.all_scores = all_scores
|
| 396 |
+
result.status_detail = status_text
|
| 397 |
+
|
| 398 |
+
result.verdict = self._interpret_verdict(best_match, status_text)
|
| 399 |
+
|
| 400 |
+
# ββ Step 6: Visualization ββββββββββββββββββββββββββββββββ
|
| 401 |
+
# Build a composite overlay: segmentation boxes + verdict stamp
|
| 402 |
+
vis_img = self.analyzer.draw_segmentation_overlay(prepared, rois, result.verdict)
|
| 403 |
+
|
| 404 |
+
# Convert overlay to base64 for transport
|
| 405 |
+
import io, base64
|
| 406 |
+
buf = io.BytesIO()
|
| 407 |
+
vis_img.save(buf, format="JPEG", quality=85)
|
| 408 |
+
result.visualization_b64 = base64.b64encode(buf.getvalue()).decode("utf-8")
|
| 409 |
+
|
| 410 |
+
# Also attach the AI backend's visualization if available
|
| 411 |
+
vis_path = ai_result.get("visualization")
|
| 412 |
+
if vis_path and os.path.exists(vis_path):
|
| 413 |
+
try:
|
| 414 |
+
with open(vis_path, "rb") as f:
|
| 415 |
+
result.visualization_b64 = base64.b64encode(f.read()).decode("utf-8")
|
| 416 |
+
finally:
|
| 417 |
+
try:
|
| 418 |
+
os.remove(vis_path)
|
| 419 |
+
except OSError:
|
| 420 |
+
pass
|
| 421 |
+
|
| 422 |
+
# Cleanup any temp files from the HF client
|
| 423 |
+
for tmp in ai_result.get("_temp_paths", []):
|
| 424 |
+
if tmp and tmp != vis_path:
|
| 425 |
+
try:
|
| 426 |
+
os.remove(tmp)
|
| 427 |
+
except OSError:
|
| 428 |
+
pass
|
| 429 |
+
|
| 430 |
+
result.elapsed_ms = (time.time() - t0) * 1000
|
| 431 |
+
log.info(
|
| 432 |
+
f"Detection: {result.verdict.value} β "
|
| 433 |
+
f"class={best_match} β conf={confidence:.3f} β "
|
| 434 |
+
f"segments={len(rois)} β {result.elapsed_ms:.0f}ms"
|
| 435 |
+
)
|
| 436 |
+
return result
|
| 437 |
+
|
| 438 |
+
@staticmethod
|
| 439 |
+
def _interpret_verdict(best_match: str, status_text: str) -> Verdict:
|
| 440 |
+
|
| 441 |
+
match_upper = best_match.upper()
|
| 442 |
+
status_lower = status_text.lower()
|
| 443 |
+
|
| 444 |
+
# Localization failures (bolt holes not found, etc.)
|
| 445 |
+
failure_markers = ["no bolt holes", "localization failed", "insufficient hole"]
|
| 446 |
+
if any(marker in status_lower for marker in failure_markers):
|
| 447 |
+
return Verdict.UNKNOWN
|
| 448 |
+
|
| 449 |
+
# Empty / none match
|
| 450 |
+
if not match_upper or match_upper == "NONE" or match_upper == "UNKNOWN":
|
| 451 |
+
return Verdict.UNKNOWN
|
| 452 |
+
|
| 453 |
+
# Explicit verdict from backend status text
|
| 454 |
+
status_upper = status_text.upper()
|
| 455 |
+
if "PASS" in status_upper:
|
| 456 |
+
return Verdict.PASS
|
| 457 |
+
if "FAIL" in status_upper:
|
| 458 |
+
return Verdict.FAIL
|
| 459 |
+
|
| 460 |
+
# Fallback: class-name heuristic
|
| 461 |
+
if "PERFECT" in match_upper:
|
| 462 |
+
return Verdict.PASS
|
| 463 |
+
|
| 464 |
+
# Everything else (Defect, Damaged, etc.) is a FAIL
|
| 465 |
+
return Verdict.FAIL
|
| 466 |
+
|
| 467 |
+
|
| 468 |
+
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 469 |
+
# Camera Source β Manages OpenCV camera lifecycle
|
| 470 |
+
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 471 |
+
|
| 472 |
+
class CameraSource:
|
| 473 |
+
|
| 474 |
+
|
| 475 |
+
WARMUP_FRAMES = 5 # Discard first N frames (often garbled)
|
| 476 |
+
|
| 477 |
+
def __init__(self, camera_id: int = 0):
|
| 478 |
+
self.camera_id = camera_id
|
| 479 |
+
self._cap: Optional[cv2.VideoCapture] = None
|
| 480 |
+
|
| 481 |
+
@staticmethod
|
| 482 |
+
def detect_available(max_check: int = 5) -> List[int]:
|
| 483 |
+
"""Probe for available camera indices."""
|
| 484 |
+
available = []
|
| 485 |
+
for idx in range(max_check):
|
| 486 |
+
cap = cv2.VideoCapture(idx, cv2.CAP_DSHOW if os.name == "nt" else cv2.CAP_ANY)
|
| 487 |
+
if cap.isOpened():
|
| 488 |
+
ret, _ = cap.read()
|
| 489 |
+
if ret:
|
| 490 |
+
available.append(idx)
|
| 491 |
+
cap.release()
|
| 492 |
+
return available
|
| 493 |
+
|
| 494 |
+
def open(self) -> bool:
|
| 495 |
+
"""Open the camera and discard warm-up frames."""
|
| 496 |
+
backend = cv2.CAP_DSHOW if os.name == "nt" else cv2.CAP_ANY
|
| 497 |
+
self._cap = cv2.VideoCapture(self.camera_id, backend)
|
| 498 |
+
|
| 499 |
+
if not self._cap.isOpened():
|
| 500 |
+
log.error(f"Cannot open camera {self.camera_id}")
|
| 501 |
+
return False
|
| 502 |
+
|
| 503 |
+
# Set resolution hints
|
| 504 |
+
self._cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
|
| 505 |
+
self._cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)
|
| 506 |
+
|
| 507 |
+
# Discard warm-up frames
|
| 508 |
+
for _ in range(self.WARMUP_FRAMES):
|
| 509 |
+
self._cap.read()
|
| 510 |
+
|
| 511 |
+
log.info(f"Camera {self.camera_id} opened β "
|
| 512 |
+
f"{int(self._cap.get(cv2.CAP_PROP_FRAME_WIDTH))}Γ"
|
| 513 |
+
f"{int(self._cap.get(cv2.CAP_PROP_FRAME_HEIGHT))}")
|
| 514 |
+
return True
|
| 515 |
+
|
| 516 |
+
def grab(self) -> Optional[Image.Image]:
|
| 517 |
+
"""Capture a single frame as a PIL Image (RGB)."""
|
| 518 |
+
if self._cap is None or not self._cap.isOpened():
|
| 519 |
+
return None
|
| 520 |
+
ret, frame = self._cap.read()
|
| 521 |
+
if not ret or frame is None:
|
| 522 |
+
return None
|
| 523 |
+
rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
| 524 |
+
return Image.fromarray(rgb)
|
| 525 |
+
|
| 526 |
+
def release(self):
|
| 527 |
+
"""Release the camera resource."""
|
| 528 |
+
if self._cap is not None:
|
| 529 |
+
self._cap.release()
|
| 530 |
+
self._cap = None
|
| 531 |
+
log.info(f"Camera {self.camera_id} released")
|
| 532 |
+
|
| 533 |
+
@property
|
| 534 |
+
def is_open(self) -> bool:
|
| 535 |
+
return self._cap is not None and self._cap.isOpened()
|
| 536 |
+
|
| 537 |
+
|
| 538 |
+
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 539 |
+
# Auto Inspector β Continuous detection loop
|
| 540 |
+
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 541 |
+
|
| 542 |
+
class AutoInspector:
|
| 543 |
+
|
| 544 |
+
def __init__(
|
| 545 |
+
self,
|
| 546 |
+
engine: DetectionEngine,
|
| 547 |
+
camera_id: int = 0,
|
| 548 |
+
interval: float = 3.0,
|
| 549 |
+
):
|
| 550 |
+
self.engine = engine
|
| 551 |
+
self.camera = CameraSource(camera_id)
|
| 552 |
+
self.interval = max(1.0, interval) # Floor at 1 second
|
| 553 |
+
|
| 554 |
+
self._stop_event = threading.Event()
|
| 555 |
+
self._thread: Optional[threading.Thread] = None
|
| 556 |
+
self._loop: Optional[asyncio.AbstractEventLoop] = None
|
| 557 |
+
self.stats = SessionStats()
|
| 558 |
+
|
| 559 |
+
@property
|
| 560 |
+
def is_running(self) -> bool:
|
| 561 |
+
return self._thread is not None and self._thread.is_alive()
|
| 562 |
+
|
| 563 |
+
def start(self, on_result: Optional[Callable] = None):
|
| 564 |
+
|
| 565 |
+
if self.is_running:
|
| 566 |
+
log.warning("Auto-inspection is already running")
|
| 567 |
+
return
|
| 568 |
+
|
| 569 |
+
self._stop_event.clear()
|
| 570 |
+
self.stats = SessionStats(start_time=time.time())
|
| 571 |
+
|
| 572 |
+
self._thread = threading.Thread(
|
| 573 |
+
target=self._run_loop,
|
| 574 |
+
args=(on_result,),
|
| 575 |
+
daemon=True,
|
| 576 |
+
name="auto-inspector",
|
| 577 |
+
)
|
| 578 |
+
self._thread.start()
|
| 579 |
+
log.info(f"Auto-inspection started β camera={self.camera.camera_id}, interval={self.interval}s")
|
| 580 |
+
|
| 581 |
+
def stop(self):
|
| 582 |
+
"""Signal the loop to stop and wait for cleanup."""
|
| 583 |
+
if not self.is_running:
|
| 584 |
+
return
|
| 585 |
+
log.info("Stopping auto-inspection...")
|
| 586 |
+
self._stop_event.set()
|
| 587 |
+
if self._thread:
|
| 588 |
+
self._thread.join(timeout=10)
|
| 589 |
+
self.camera.release()
|
| 590 |
+
log.info(f"Auto-inspection stopped β {self.stats.to_dict()}")
|
| 591 |
+
|
| 592 |
+
def _run_loop(self, on_result: Optional[Callable]):
|
| 593 |
+
"""Internal loop that runs in a background thread."""
|
| 594 |
+
# Create a new event loop for this thread
|
| 595 |
+
loop = asyncio.new_event_loop()
|
| 596 |
+
asyncio.set_event_loop(loop)
|
| 597 |
+
self._loop = loop
|
| 598 |
+
|
| 599 |
+
try:
|
| 600 |
+
if not self.camera.open():
|
| 601 |
+
log.error("Failed to open camera β aborting auto-inspection")
|
| 602 |
+
return
|
| 603 |
+
|
| 604 |
+
while not self._stop_event.is_set():
|
| 605 |
+
frame = self.camera.grab()
|
| 606 |
+
if frame is None:
|
| 607 |
+
log.warning("Frame grab failed β retrying in 1s")
|
| 608 |
+
self._stop_event.wait(1.0)
|
| 609 |
+
continue
|
| 610 |
+
|
| 611 |
+
# Run detection synchronously within this thread's event loop
|
| 612 |
+
result = loop.run_until_complete(self.engine.run(frame))
|
| 613 |
+
self.stats.record(result.verdict)
|
| 614 |
+
|
| 615 |
+
if on_result:
|
| 616 |
+
try:
|
| 617 |
+
on_result(result, self.stats)
|
| 618 |
+
except Exception as cb_err:
|
| 619 |
+
log.error(f"Callback error: {cb_err}")
|
| 620 |
+
|
| 621 |
+
# Wait for the interval (interruptible)
|
| 622 |
+
self._stop_event.wait(self.interval)
|
| 623 |
+
|
| 624 |
+
except Exception as exc:
|
| 625 |
+
log.error(f"Auto-inspection loop crashed: {exc}", exc_info=True)
|
| 626 |
+
finally:
|
| 627 |
+
self.camera.release()
|
| 628 |
+
loop.close()
|
| 629 |
+
|
| 630 |
+
|
| 631 |
+
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 632 |
+
# Single-Image Detection (convenience function)
|
| 633 |
+
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 634 |
+
|
| 635 |
+
async def detect_image(
|
| 636 |
+
image_path: str,
|
| 637 |
+
threshold: float = 0.70,
|
| 638 |
+
engine: Optional[DetectionEngine] = None,
|
| 639 |
+
) -> DetectionResult:
|
| 640 |
+
|
| 641 |
+
if not os.path.isfile(image_path):
|
| 642 |
+
raise FileNotFoundError(f"Image not found: {image_path}")
|
| 643 |
+
|
| 644 |
+
img = Image.open(image_path)
|
| 645 |
+
eng = engine or DetectionEngine(threshold=threshold)
|
| 646 |
+
return await eng.run(img, threshold)
|
| 647 |
+
|
| 648 |
+
|
| 649 |
+
# βββοΏ½οΏ½οΏ½βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 650 |
+
# CLI β Run as standalone script
|
| 651 |
+
# βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 652 |
+
|
| 653 |
+
def _print_result(result: DetectionResult, stats: Optional[SessionStats] = None):
|
| 654 |
+
|
| 655 |
+
v = result.verdict.value
|
| 656 |
+
color = {"PASS": "\033[92m", "FAIL": "\033[91m", "UNKNOWN": "\033[93m", "ERROR": "\033[90m"}
|
| 657 |
+
reset = "\033[0m"
|
| 658 |
+
c = color.get(v, "")
|
| 659 |
+
|
| 660 |
+
print(f"\n{'β' * 50}")
|
| 661 |
+
print(f" {c}β {v}{reset} β class: {result.matched_class or 'β'} β conf: {result.confidence:.1%}")
|
| 662 |
+
print(f" segments: {result.segments_found} β quality: {result.quality.quality_score:.0f} β {result.elapsed_ms:.0f}ms")
|
| 663 |
+
|
| 664 |
+
if result.all_scores:
|
| 665 |
+
scores_str = " ".join(f"{k}: {v:.1%}" for k, v in result.all_scores.items())
|
| 666 |
+
print(f" scores: {scores_str}")
|
| 667 |
+
|
| 668 |
+
if result.status_detail:
|
| 669 |
+
detail = result.status_detail[:120].replace("\n", " ")
|
| 670 |
+
print(f" detail: {detail}")
|
| 671 |
+
|
| 672 |
+
if stats:
|
| 673 |
+
s = stats
|
| 674 |
+
print(f" session: {s.total} total β β{s.passed} β{s.failed} ?{s.unknown} β {s.elapsed_seconds:.0f}s")
|
| 675 |
+
print(f"{'β' * 50}")
|
| 676 |
+
|
| 677 |
+
|
| 678 |
+
def main():
|
| 679 |
+
parser = argparse.ArgumentParser(
|
| 680 |
+
description="Engine Part Detection β Standalone Detection Pipeline",
|
| 681 |
+
formatter_class=argparse.RawDescriptionHelpFormatter,
|
| 682 |
+
epilog="""
|
| 683 |
+
Examples:
|
| 684 |
+
python index.py # Auto-detect camera, run continuous
|
| 685 |
+
python index.py --camera 0 --interval 5 # Camera 0, every 5 seconds
|
| 686 |
+
python index.py --image part.jpg # Single image detection
|
| 687 |
+
python index.py --list-cameras # Show available cameras
|
| 688 |
+
""",
|
| 689 |
+
)
|
| 690 |
+
|
| 691 |
+
grp = parser.add_mutually_exclusive_group()
|
| 692 |
+
grp.add_argument("--image", "-i", type=str, help="Path to a single image file for detection")
|
| 693 |
+
grp.add_argument("--camera", "-c", type=int, default=None, help="Camera index (default: auto-detect)")
|
| 694 |
+
grp.add_argument("--list-cameras", action="store_true", help="List available cameras and exit")
|
| 695 |
+
|
| 696 |
+
parser.add_argument("--threshold", "-t", type=float, default=0.70, help="Detection threshold (default: 0.70)")
|
| 697 |
+
parser.add_argument("--interval", type=float, default=3.0, help="Seconds between captures in auto mode (default: 3.0)")
|
| 698 |
+
parser.add_argument("--quiet", "-q", action="store_true", help="Suppress verbose output")
|
| 699 |
+
|
| 700 |
+
args = parser.parse_args()
|
| 701 |
+
|
| 702 |
+
if args.quiet:
|
| 703 |
+
logging.getLogger("detection").setLevel(logging.WARNING)
|
| 704 |
+
|
| 705 |
+
# ββ List cameras βββββββββββββββββββββββββββββββββββββββββββββ
|
| 706 |
+
if args.list_cameras:
|
| 707 |
+
print("Scanning for cameras...")
|
| 708 |
+
cams = CameraSource.detect_available()
|
| 709 |
+
if cams:
|
| 710 |
+
print(f"Found {len(cams)} camera(s): {cams}")
|
| 711 |
+
else:
|
| 712 |
+
print("No cameras detected.")
|
| 713 |
+
sys.exit(0)
|
| 714 |
+
|
| 715 |
+
# ββ Single image mode ββββββββββββββββββββββββββββββββββββββββ
|
| 716 |
+
if args.image:
|
| 717 |
+
print(f"Analyzing: {args.image}")
|
| 718 |
+
result = asyncio.run(detect_image(args.image, args.threshold))
|
| 719 |
+
_print_result(result)
|
| 720 |
+
sys.exit(0 if result.verdict != Verdict.ERROR else 1)
|
| 721 |
+
|
| 722 |
+
# ββ Auto inspection mode (camera) ββββββββββββββββββββββββββββ
|
| 723 |
+
camera_id = args.camera
|
| 724 |
+
if camera_id is None:
|
| 725 |
+
print("Auto-detecting cameras...")
|
| 726 |
+
available = CameraSource.detect_available()
|
| 727 |
+
if not available:
|
| 728 |
+
print("No cameras found. Use --image for file-based detection.")
|
| 729 |
+
sys.exit(1)
|
| 730 |
+
camera_id = available[0]
|
| 731 |
+
print(f"Using camera {camera_id}")
|
| 732 |
+
|
| 733 |
+
engine = DetectionEngine(threshold=args.threshold)
|
| 734 |
+
inspector = AutoInspector(engine, camera_id=camera_id, interval=args.interval)
|
| 735 |
+
|
| 736 |
+
print(f"\n Auto Inspection Mode")
|
| 737 |
+
print(f" Camera: {camera_id} β Interval: {args.interval}s β Threshold: {args.threshold}")
|
| 738 |
+
print(f" Press Ctrl+C to stop\n")
|
| 739 |
+
|
| 740 |
+
inspector.start(on_result=_print_result)
|
| 741 |
+
|
| 742 |
+
try:
|
| 743 |
+
while inspector.is_running:
|
| 744 |
+
time.sleep(0.5)
|
| 745 |
+
except KeyboardInterrupt:
|
| 746 |
+
print("\n\nStopping...")
|
| 747 |
+
finally:
|
| 748 |
+
inspector.stop()
|
| 749 |
+
print(f"\nSession summary: {inspector.stats.to_dict()}")
|
| 750 |
+
|
| 751 |
+
|
| 752 |
+
if __name__ == "__main__":
|
| 753 |
+
main()
|