Spaces:
Running
Running
File size: 1,510 Bytes
9661246 65ed4c1 8fe1b94 a71f519 9661246 65ed4c1 363a646 65ed4c1 363a646 65ed4c1 9661246 fa507b4 363a646 fa507b4 9661246 fa507b4 9661246 712c074 9661246 f823764 9661246 712c074 9661246 f823764 33069a9 9661246 33069a9 9661246 65ed4c1 9661246 f823764 f617821 8fe1b94 65ed4c1 9661246 65ed4c1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 |
import easyocr
import numpy as np
import cv2
import re
reader = easyocr.Reader(['en'], gpu=False)
def extract_weight_from_image(pil_img):
try:
img = np.array(pil_img)
# Resize image for consistency
if img.shape[1] > 1000:
img = cv2.resize(img, (1000, int(img.shape[0] * 1000 / img.shape[1])))
# Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Upscale image
gray = cv2.resize(gray, None, fx=4, fy=4, interpolation=cv2.INTER_LINEAR)
# Histogram Equalization and slight blur
gray = cv2.equalizeHist(gray)
blurred = cv2.GaussianBlur(gray, (3, 3), 0)
# Adaptive threshold
thresh = cv2.adaptiveThreshold(blurred, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY, 11, 2)
# Invert if needed
white_ratio = np.mean(thresh > 127)
if white_ratio < 0.5:
thresh = cv2.bitwise_not(thresh)
# OCR
result = reader.readtext(thresh, detail=0)
print("🧠 OCR Raw Output:", result)
combined_text = " ".join(result).strip()
# Extract number
match = re.search(r"(\d{1,4}(?:\.\d{1,2})?)", combined_text)
if match:
weight = match.group(1)
return f"{weight} kg", 100.0
else:
return "No weight detected kg", 0.0
except Exception as e:
print("❌ OCR Error:", e)
return f"Error: {str(e)}", 0.0
|