Spaces:
Running
Running
Update ocr_engine.py
Browse files- ocr_engine.py +70 -56
ocr_engine.py
CHANGED
@@ -32,25 +32,32 @@ def estimate_brightness(img):
|
|
32 |
return np.mean(gray)
|
33 |
|
34 |
def preprocess_image(img):
|
35 |
-
"""Preprocess image with
|
36 |
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
37 |
brightness = estimate_brightness(img)
|
38 |
-
|
39 |
-
|
40 |
-
|
|
|
41 |
enhanced = clahe.apply(gray)
|
42 |
save_debug_image(enhanced, "01_preprocess_clahe")
|
43 |
-
|
44 |
-
|
|
|
|
|
45 |
save_debug_image(blurred, "02_preprocess_blur")
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
|
|
|
|
|
|
|
|
51 |
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
|
52 |
-
thresh = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=
|
53 |
-
thresh = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel, iterations=
|
54 |
save_debug_image(thresh, "03_preprocess_morph")
|
55 |
return thresh, enhanced
|
56 |
|
@@ -58,12 +65,12 @@ def correct_rotation(img):
|
|
58 |
"""Correct image rotation using edge detection."""
|
59 |
try:
|
60 |
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
61 |
-
edges = cv2.Canny(gray,
|
62 |
-
lines = cv2.HoughLinesP(edges, 1, np.pi / 180, threshold=
|
63 |
if lines is not None:
|
64 |
angles = [np.arctan2(line[0][3] - line[0][1], line[0][2] - line[0][0]) * 180 / np.pi for line in lines]
|
65 |
angle = np.median(angles)
|
66 |
-
if abs(angle) > 0.
|
67 |
h, w = img.shape[:2]
|
68 |
center = (w // 2, h // 2)
|
69 |
M = cv2.getRotationMatrix2D(center, angle, 1.0)
|
@@ -76,20 +83,22 @@ def correct_rotation(img):
|
|
76 |
return img
|
77 |
|
78 |
def detect_roi(img):
|
79 |
-
"""Detect region of interest with
|
80 |
try:
|
81 |
save_debug_image(img, "04_original")
|
82 |
thresh, enhanced = preprocess_image(img)
|
83 |
brightness_map = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
84 |
-
block_sizes = [max(
|
85 |
valid_contours = []
|
86 |
img_area = img.shape[0] * img.shape[1]
|
87 |
|
88 |
for block_size in block_sizes:
|
89 |
-
temp_thresh = cv2.adaptiveThreshold(
|
90 |
-
|
|
|
|
|
91 |
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
|
92 |
-
temp_thresh = cv2.morphologyEx(temp_thresh, cv2.MORPH_CLOSE, kernel, iterations=
|
93 |
save_debug_image(temp_thresh, f"05_roi_threshold_block{block_size}")
|
94 |
contours, _ = cv2.findContours(temp_thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
95 |
|
@@ -98,15 +107,15 @@ def detect_roi(img):
|
|
98 |
x, y, w, h = cv2.boundingRect(c)
|
99 |
roi_brightness = np.mean(brightness_map[y:y+h, x:x+w])
|
100 |
aspect_ratio = w / h
|
101 |
-
if (
|
102 |
-
0.
|
103 |
valid_contours.append((c, area * roi_brightness))
|
104 |
logging.debug(f"Contour (block {block_size}): Area={area}, Aspect={aspect_ratio:.2f}, Brightness={roi_brightness:.2f}")
|
105 |
|
106 |
if valid_contours:
|
107 |
contour, _ = max(valid_contours, key=lambda x: x[1])
|
108 |
x, y, w, h = cv2.boundingRect(contour)
|
109 |
-
padding = max(
|
110 |
x, y = max(0, x - padding), max(0, y - padding)
|
111 |
w, h = min(w + 2 * padding, img.shape[1] - x), min(h + 2 * padding, img.shape[0] - y)
|
112 |
roi_img = img[y:y+h, x:x+w]
|
@@ -123,83 +132,88 @@ def detect_roi(img):
|
|
123 |
return img, None
|
124 |
|
125 |
def detect_digit_template(digit_img, brightness):
|
126 |
-
"""Digit recognition using template matching with
|
127 |
try:
|
128 |
h, w = digit_img.shape
|
129 |
-
if h <
|
130 |
logging.debug("Digit image too small for template matching.")
|
131 |
return None
|
132 |
|
133 |
-
#
|
134 |
digit_templates = {
|
135 |
'0': np.array([[1, 1, 1, 1, 1],
|
136 |
[1, 0, 0, 0, 1],
|
137 |
[1, 0, 0, 0, 1],
|
138 |
[1, 0, 0, 0, 1],
|
139 |
-
[1, 1, 1, 1, 1]]),
|
140 |
'1': np.array([[0, 0, 1, 0, 0],
|
141 |
[0, 0, 1, 0, 0],
|
142 |
[0, 0, 1, 0, 0],
|
143 |
[0, 0, 1, 0, 0],
|
144 |
-
[0, 0, 1, 0, 0]]),
|
145 |
'2': np.array([[1, 1, 1, 1, 1],
|
146 |
[0, 0, 0, 1, 1],
|
147 |
[1, 1, 1, 1, 1],
|
148 |
[1, 1, 0, 0, 0],
|
149 |
-
[1, 1, 1, 1, 1]]),
|
150 |
'3': np.array([[1, 1, 1, 1, 1],
|
151 |
[0, 0, 0, 1, 1],
|
152 |
-
[
|
153 |
[0, 0, 0, 1, 1],
|
154 |
-
[1, 1, 1, 1, 1]]),
|
155 |
'4': np.array([[1, 1, 0, 0, 1],
|
156 |
[1, 1, 0, 0, 1],
|
157 |
[1, 1, 1, 1, 1],
|
158 |
[0, 0, 0, 0, 1],
|
159 |
-
[0, 0, 0, 0, 1]]),
|
160 |
'5': np.array([[1, 1, 1, 1, 1],
|
161 |
[1, 1, 0, 0, 0],
|
162 |
[1, 1, 1, 1, 1],
|
163 |
[0, 0, 0, 1, 1],
|
164 |
-
[1, 1, 1, 1, 1]]),
|
165 |
'6': np.array([[1, 1, 1, 1, 1],
|
166 |
[1, 1, 0, 0, 0],
|
167 |
[1, 1, 1, 1, 1],
|
168 |
[1, 0, 0, 1, 1],
|
169 |
-
[1, 1, 1, 1, 1]]),
|
170 |
'7': np.array([[1, 1, 1, 1, 1],
|
171 |
[0, 0, 0, 0, 1],
|
172 |
[0, 0, 0, 0, 1],
|
173 |
[0, 0, 0, 0, 1],
|
174 |
-
[0, 0, 0, 0, 1]]),
|
175 |
'8': np.array([[1, 1, 1, 1, 1],
|
176 |
[1, 0, 0, 0, 1],
|
177 |
[1, 1, 1, 1, 1],
|
178 |
[1, 0, 0, 0, 1],
|
179 |
-
[1, 1, 1, 1, 1]]),
|
180 |
'9': np.array([[1, 1, 1, 1, 1],
|
181 |
[1, 0, 0, 0, 1],
|
182 |
[1, 1, 1, 1, 1],
|
183 |
[0, 0, 0, 1, 1],
|
184 |
-
[1, 1, 1, 1, 1]]),
|
185 |
'.': np.array([[0, 0, 0],
|
186 |
[0, 1, 0],
|
187 |
-
[0, 0, 0]])
|
188 |
}
|
189 |
|
190 |
-
# Resize
|
191 |
-
|
|
|
|
|
|
|
192 |
best_match, best_score = None, -1
|
193 |
for digit, template in digit_templates.items():
|
194 |
-
if digit == '.':
|
195 |
-
|
|
|
|
|
196 |
result = cv2.matchTemplate(digit_img_resized, template, cv2.TM_CCOEFF_NORMED)
|
197 |
_, max_val, _, _ = cv2.minMaxLoc(result)
|
198 |
-
if max_val > 0.
|
199 |
best_score = max_val
|
200 |
best_match = digit
|
201 |
logging.debug(f"Template match: {best_match}, Score: {best_score:.2f}")
|
202 |
-
return best_match if best_score > 0.
|
203 |
except Exception as e:
|
204 |
logging.error(f"Template digit detection failed: {str(e)}")
|
205 |
return None
|
@@ -212,8 +226,8 @@ def perform_ocr(img, roi_bbox):
|
|
212 |
pil_img = Image.fromarray(enhanced)
|
213 |
save_debug_image(pil_img, "07_ocr_input")
|
214 |
|
215 |
-
# Tesseract with
|
216 |
-
custom_config = r'--oem 3 --psm
|
217 |
text = pytesseract.image_to_string(pil_img, config=custom_config)
|
218 |
logging.info(f"Tesseract raw output: {text}")
|
219 |
|
@@ -224,7 +238,7 @@ def perform_ocr(img, roi_bbox):
|
|
224 |
text = text.strip('.')
|
225 |
if text and re.fullmatch(r"^\d*\.?\d*$", text):
|
226 |
text = text.lstrip('0') or '0'
|
227 |
-
confidence =
|
228 |
logging.info(f"Validated Tesseract text: {text}, Confidence: {confidence:.2f}%")
|
229 |
return text, confidence
|
230 |
|
@@ -234,7 +248,7 @@ def perform_ocr(img, roi_bbox):
|
|
234 |
digits_info = []
|
235 |
for c in contours:
|
236 |
x, y, w, h = cv2.boundingRect(c)
|
237 |
-
if w >
|
238 |
digits_info.append((x, x+w, y, y+h))
|
239 |
|
240 |
if digits_info:
|
@@ -251,7 +265,7 @@ def perform_ocr(img, roi_bbox):
|
|
251 |
digit = detect_digit_template(digit_crop, brightness)
|
252 |
if digit:
|
253 |
recognized_text += digit
|
254 |
-
elif x_min - prev_x_max <
|
255 |
recognized_text += '.'
|
256 |
prev_x_max = x_max
|
257 |
|
@@ -261,7 +275,7 @@ def perform_ocr(img, roi_bbox):
|
|
261 |
text = text.strip('.')
|
262 |
if text and re.fullmatch(r"^\d*\.?\d*$", text):
|
263 |
text = text.lstrip('0') or '0'
|
264 |
-
confidence =
|
265 |
logging.info(f"Validated template text: {text}, Confidence: {confidence:.2f}%")
|
266 |
return text, confidence
|
267 |
|
@@ -279,17 +293,17 @@ def extract_weight_from_image(pil_img):
|
|
279 |
save_debug_image(img, "00_input_image")
|
280 |
img = correct_rotation(img)
|
281 |
brightness = estimate_brightness(img)
|
282 |
-
conf_threshold = 0.
|
283 |
|
284 |
roi_img, roi_bbox = detect_roi(img)
|
285 |
if roi_bbox:
|
286 |
-
conf_threshold *= 1.
|
287 |
|
288 |
result, confidence = perform_ocr(roi_img, roi_bbox)
|
289 |
if result and confidence >= conf_threshold * 100:
|
290 |
try:
|
291 |
weight = float(result)
|
292 |
-
if 0.
|
293 |
logging.info(f"Detected weight: {result} kg, Confidence: {confidence:.2f}%")
|
294 |
return result, confidence
|
295 |
logging.warning(f"Weight {result} out of range.")
|
@@ -298,10 +312,10 @@ def extract_weight_from_image(pil_img):
|
|
298 |
|
299 |
logging.info("Primary OCR failed, using full image fallback.")
|
300 |
result, confidence = perform_ocr(img, None)
|
301 |
-
if result and confidence >= conf_threshold * 0.
|
302 |
try:
|
303 |
weight = float(result)
|
304 |
-
if 0.
|
305 |
logging.info(f"Full image weight: {result} kg, Confidence: {confidence:.2f}%")
|
306 |
return result, confidence
|
307 |
logging.warning(f"Full image weight {result} out of range.")
|
|
|
32 |
return np.mean(gray)
|
33 |
|
34 |
def preprocess_image(img):
|
35 |
+
"""Preprocess image with dynamic contrast and noise handling."""
|
36 |
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
37 |
brightness = estimate_brightness(img)
|
38 |
+
|
39 |
+
# Dynamic CLAHE based on brightness
|
40 |
+
clahe_clip = 10.0 if brightness < 100 else 6.0
|
41 |
+
clahe = cv2.createCLAHE(clipLimit=clahe_clip, tileGridSize=(8, 8))
|
42 |
enhanced = clahe.apply(gray)
|
43 |
save_debug_image(enhanced, "01_preprocess_clahe")
|
44 |
+
|
45 |
+
# Edge-preserving blur with adaptive parameters
|
46 |
+
blur_diameter = 9 if brightness < 100 else 7
|
47 |
+
blurred = cv2.bilateralFilter(enhanced, blur_diameter, 75, 75)
|
48 |
save_debug_image(blurred, "02_preprocess_blur")
|
49 |
+
|
50 |
+
# Dynamic adaptive thresholding
|
51 |
+
block_size = max(5, min(21, int(img.shape[0] / 30) * 2 + 1))
|
52 |
+
thresh = cv2.adaptiveThreshold(
|
53 |
+
blurred, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
|
54 |
+
cv2.THRESH_BINARY_INV, block_size, 5
|
55 |
+
)
|
56 |
+
|
57 |
+
# Morphological operations for better digit segmentation
|
58 |
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
|
59 |
+
thresh = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=2)
|
60 |
+
thresh = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel, iterations=4)
|
61 |
save_debug_image(thresh, "03_preprocess_morph")
|
62 |
return thresh, enhanced
|
63 |
|
|
|
65 |
"""Correct image rotation using edge detection."""
|
66 |
try:
|
67 |
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
68 |
+
edges = cv2.Canny(gray, 50, 150, apertureSize=3)
|
69 |
+
lines = cv2.HoughLinesP(edges, 1, np.pi / 180, threshold=30, minLineLength=20, maxLineGap=5)
|
70 |
if lines is not None:
|
71 |
angles = [np.arctan2(line[0][3] - line[0][1], line[0][2] - line[0][0]) * 180 / np.pi for line in lines]
|
72 |
angle = np.median(angles)
|
73 |
+
if abs(angle) > 0.5:
|
74 |
h, w = img.shape[:2]
|
75 |
center = (w // 2, h // 2)
|
76 |
M = cv2.getRotationMatrix2D(center, angle, 1.0)
|
|
|
83 |
return img
|
84 |
|
85 |
def detect_roi(img):
|
86 |
+
"""Detect region of interest with multi-scale contour analysis."""
|
87 |
try:
|
88 |
save_debug_image(img, "04_original")
|
89 |
thresh, enhanced = preprocess_image(img)
|
90 |
brightness_map = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
91 |
+
block_sizes = [max(5, min(21, int(img.shape[0] / s) * 2 + 1)) for s in [5, 10, 15]]
|
92 |
valid_contours = []
|
93 |
img_area = img.shape[0] * img.shape[1]
|
94 |
|
95 |
for block_size in block_sizes:
|
96 |
+
temp_thresh = cv2.adaptiveThreshold(
|
97 |
+
enhanced, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
|
98 |
+
cv2.THRESH_BINARY_INV, block_size, 5
|
99 |
+
)
|
100 |
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
|
101 |
+
temp_thresh = cv2.morphologyEx(temp_thresh, cv2.MORPH_CLOSE, kernel, iterations=4)
|
102 |
save_debug_image(temp_thresh, f"05_roi_threshold_block{block_size}")
|
103 |
contours, _ = cv2.findContours(temp_thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
104 |
|
|
|
107 |
x, y, w, h = cv2.boundingRect(c)
|
108 |
roi_brightness = np.mean(brightness_map[y:y+h, x:x+w])
|
109 |
aspect_ratio = w / h
|
110 |
+
if (100 < area < (img_area * 0.9) and
|
111 |
+
0.1 <= aspect_ratio <= 15.0 and w > 30 and h > 10 and roi_brightness > 20):
|
112 |
valid_contours.append((c, area * roi_brightness))
|
113 |
logging.debug(f"Contour (block {block_size}): Area={area}, Aspect={aspect_ratio:.2f}, Brightness={roi_brightness:.2f}")
|
114 |
|
115 |
if valid_contours:
|
116 |
contour, _ = max(valid_contours, key=lambda x: x[1])
|
117 |
x, y, w, h = cv2.boundingRect(contour)
|
118 |
+
padding = max(8, min(25, int(min(w, h) * 0.3)))
|
119 |
x, y = max(0, x - padding), max(0, y - padding)
|
120 |
w, h = min(w + 2 * padding, img.shape[1] - x), min(h + 2 * padding, img.shape[0] - y)
|
121 |
roi_img = img[y:y+h, x:x+w]
|
|
|
132 |
return img, None
|
133 |
|
134 |
def detect_digit_template(digit_img, brightness):
|
135 |
+
"""Digit recognition using template matching with refined patterns."""
|
136 |
try:
|
137 |
h, w = digit_img.shape
|
138 |
+
if h < 6 or w < 3:
|
139 |
logging.debug("Digit image too small for template matching.")
|
140 |
return None
|
141 |
|
142 |
+
# Refined digit templates for seven-segment display
|
143 |
digit_templates = {
|
144 |
'0': np.array([[1, 1, 1, 1, 1],
|
145 |
[1, 0, 0, 0, 1],
|
146 |
[1, 0, 0, 0, 1],
|
147 |
[1, 0, 0, 0, 1],
|
148 |
+
[1, 1, 1, 1, 1]], dtype=np.float32),
|
149 |
'1': np.array([[0, 0, 1, 0, 0],
|
150 |
[0, 0, 1, 0, 0],
|
151 |
[0, 0, 1, 0, 0],
|
152 |
[0, 0, 1, 0, 0],
|
153 |
+
[0, 0, 1, 0, 0]], dtype=np.float32),
|
154 |
'2': np.array([[1, 1, 1, 1, 1],
|
155 |
[0, 0, 0, 1, 1],
|
156 |
[1, 1, 1, 1, 1],
|
157 |
[1, 1, 0, 0, 0],
|
158 |
+
[1, 1, 1, 1, 1]], dtype=np.float32),
|
159 |
'3': np.array([[1, 1, 1, 1, 1],
|
160 |
[0, 0, 0, 1, 1],
|
161 |
+
[1, 1, 1, 1, 1],
|
162 |
[0, 0, 0, 1, 1],
|
163 |
+
[1, 1, 1, 1, 1]], dtype=np.float32),
|
164 |
'4': np.array([[1, 1, 0, 0, 1],
|
165 |
[1, 1, 0, 0, 1],
|
166 |
[1, 1, 1, 1, 1],
|
167 |
[0, 0, 0, 0, 1],
|
168 |
+
[0, 0, 0, 0, 1]], dtype=np.float32),
|
169 |
'5': np.array([[1, 1, 1, 1, 1],
|
170 |
[1, 1, 0, 0, 0],
|
171 |
[1, 1, 1, 1, 1],
|
172 |
[0, 0, 0, 1, 1],
|
173 |
+
[1, 1, 1, 1, 1]], dtype=np.float32),
|
174 |
'6': np.array([[1, 1, 1, 1, 1],
|
175 |
[1, 1, 0, 0, 0],
|
176 |
[1, 1, 1, 1, 1],
|
177 |
[1, 0, 0, 1, 1],
|
178 |
+
[1, 1, 1, 1, 1]], dtype=np.float32),
|
179 |
'7': np.array([[1, 1, 1, 1, 1],
|
180 |
[0, 0, 0, 0, 1],
|
181 |
[0, 0, 0, 0, 1],
|
182 |
[0, 0, 0, 0, 1],
|
183 |
+
[0, 0, 0, 0, 1]], dtype=np.float32),
|
184 |
'8': np.array([[1, 1, 1, 1, 1],
|
185 |
[1, 0, 0, 0, 1],
|
186 |
[1, 1, 1, 1, 1],
|
187 |
[1, 0, 0, 0, 1],
|
188 |
+
[1, 1, 1, 1, 1]], dtype=np.float32),
|
189 |
'9': np.array([[1, 1, 1, 1, 1],
|
190 |
[1, 0, 0, 0, 1],
|
191 |
[1, 1, 1, 1, 1],
|
192 |
[0, 0, 0, 1, 1],
|
193 |
+
[1, 1, 1, 1, 1]], dtype=np.float32),
|
194 |
'.': np.array([[0, 0, 0],
|
195 |
[0, 1, 0],
|
196 |
+
[0, 0, 0]], dtype=np.float32)
|
197 |
}
|
198 |
|
199 |
+
# Resize digit image to match template size
|
200 |
+
target_size = (5, 5) if h > w else (3, 3) # Adjust for decimal point
|
201 |
+
digit_img_resized = cv2.resize(digit_img, target_size, interpolation=cv2.INTER_AREA)
|
202 |
+
digit_img_resized = (digit_img_resized > 128).astype(np.float32) # Binarize
|
203 |
+
|
204 |
best_match, best_score = None, -1
|
205 |
for digit, template in digit_templates.items():
|
206 |
+
if digit == '.' and target_size != (3, 3):
|
207 |
+
continue
|
208 |
+
if digit != '.' and target_size == (3, 3):
|
209 |
+
continue
|
210 |
result = cv2.matchTemplate(digit_img_resized, template, cv2.TM_CCOEFF_NORMED)
|
211 |
_, max_val, _, _ = cv2.minMaxLoc(result)
|
212 |
+
if max_val > 0.6 and max_val > best_score: # Lowered threshold
|
213 |
best_score = max_val
|
214 |
best_match = digit
|
215 |
logging.debug(f"Template match: {best_match}, Score: {best_score:.2f}")
|
216 |
+
return best_match if best_score > 0.6 else None
|
217 |
except Exception as e:
|
218 |
logging.error(f"Template digit detection failed: {str(e)}")
|
219 |
return None
|
|
|
226 |
pil_img = Image.fromarray(enhanced)
|
227 |
save_debug_image(pil_img, "07_ocr_input")
|
228 |
|
229 |
+
# Tesseract with optimized numeric config
|
230 |
+
custom_config = r'--oem 3 --psm 7 -c tessedit_char_whitelist=0123456789.'
|
231 |
text = pytesseract.image_to_string(pil_img, config=custom_config)
|
232 |
logging.info(f"Tesseract raw output: {text}")
|
233 |
|
|
|
238 |
text = text.strip('.')
|
239 |
if text and re.fullmatch(r"^\d*\.?\d*$", text):
|
240 |
text = text.lstrip('0') or '0'
|
241 |
+
confidence = 95.0 if len(text.replace('.', '')) >= 3 else 90.0
|
242 |
logging.info(f"Validated Tesseract text: {text}, Confidence: {confidence:.2f}%")
|
243 |
return text, confidence
|
244 |
|
|
|
248 |
digits_info = []
|
249 |
for c in contours:
|
250 |
x, y, w, h = cv2.boundingRect(c)
|
251 |
+
if w > 5 and h > 6 and 0.05 <= w/h <= 3.0:
|
252 |
digits_info.append((x, x+w, y, y+h))
|
253 |
|
254 |
if digits_info:
|
|
|
265 |
digit = detect_digit_template(digit_crop, brightness)
|
266 |
if digit:
|
267 |
recognized_text += digit
|
268 |
+
elif x_min - prev_x_max < 8 and prev_x_max != -float('inf'):
|
269 |
recognized_text += '.'
|
270 |
prev_x_max = x_max
|
271 |
|
|
|
275 |
text = text.strip('.')
|
276 |
if text and re.fullmatch(r"^\d*\.?\d*$", text):
|
277 |
text = text.lstrip('0') or '0'
|
278 |
+
confidence = 90.0 if len(text.replace('.', '')) >= 3 else 85.0
|
279 |
logging.info(f"Validated template text: {text}, Confidence: {confidence:.2f}%")
|
280 |
return text, confidence
|
281 |
|
|
|
293 |
save_debug_image(img, "00_input_image")
|
294 |
img = correct_rotation(img)
|
295 |
brightness = estimate_brightness(img)
|
296 |
+
conf_threshold = 0.7 if brightness > 80 else 0.5
|
297 |
|
298 |
roi_img, roi_bbox = detect_roi(img)
|
299 |
if roi_bbox:
|
300 |
+
conf_threshold *= 1.1 if (roi_bbox[2] * roi_bbox[3]) > (img.shape[0] * img.shape[1] * 0.1) else 1.0
|
301 |
|
302 |
result, confidence = perform_ocr(roi_img, roi_bbox)
|
303 |
if result and confidence >= conf_threshold * 100:
|
304 |
try:
|
305 |
weight = float(result)
|
306 |
+
if 0.001 <= weight <= 2000:
|
307 |
logging.info(f"Detected weight: {result} kg, Confidence: {confidence:.2f}%")
|
308 |
return result, confidence
|
309 |
logging.warning(f"Weight {result} out of range.")
|
|
|
312 |
|
313 |
logging.info("Primary OCR failed, using full image fallback.")
|
314 |
result, confidence = perform_ocr(img, None)
|
315 |
+
if result and confidence >= conf_threshold * 0.9 * 100:
|
316 |
try:
|
317 |
weight = float(result)
|
318 |
+
if 0.001 <= weight <= 2000:
|
319 |
logging.info(f"Full image weight: {result} kg, Confidence: {confidence:.2f}%")
|
320 |
return result, confidence
|
321 |
logging.warning(f"Full image weight {result} out of range.")
|