from io import BytesIO
import re
import numpy as np
import cv2
import ddddocr
from PIL import Image, ImageDraw, ImageFont, ImageEnhance

class Ddddocr:
    AMBIG = {
        "文": ["实"],   # 如果识别成“文”，候选里有“实”就改为“实"
        "者": ["着"],  # 如果识别成“者”，候选里有“着”就改为“着"
    }

    def __init__(self, font_path: str = "./msyhl.ttc", font_size: int = 20):
        self.ocr = ddddocr.DdddOcr(show_ad=False)
        self.det_ocr = ddddocr.DdddOcr(det=True, show_ad=False)
        self.font = ImageFont.truetype(font_path, font_size)

    def to_grayscale(self, image_bytes: bytes) -> bytes:
        img = Image.open(BytesIO(image_bytes)).convert("L")
        buf = BytesIO()
        img.save(buf, format="PNG")
        return buf.getvalue()

    def crop_with_margin(self, img: Image.Image, box: tuple[int,int,int,int], margin: int = 8, scale: float = 1.5) -> Image.Image:
        x1, y1, x2, y2 = box
        x1m = max(x1 - margin, 0)
        y1m = max(y1 - margin, 0)
        x2m = min(x2 + margin, img.width)
        y2m = min(y2 + margin, img.height)
        region = img.crop((x1m, y1m, x2m, y2m))
        new_size = (int(region.width * scale), int(region.height * scale))
        return region.resize(new_size, Image.LANCZOS)

    def enhance_image(self, pil_img: Image.Image) -> Image.Image:
        sharp = ImageEnhance.Sharpness(pil_img).enhance(2.0)
        cont = ImageEnhance.Contrast(sharp).enhance(1.5)
        return cont

    def binarize_and_close(self, pil_img: Image.Image, thresh: int = 140, kernel_size: int = 2) -> Image.Image:
        gray = np.array(pil_img.convert("L"))
        _, bw = cv2.threshold(gray, thresh, 255, cv2.THRESH_BINARY_INV)
        kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (kernel_size, kernel_size))
        closed = cv2.morphologyEx(bw, cv2.MORPH_CLOSE, kernel)
        closed = cv2.bitwise_not(closed)
        return Image.fromarray(closed)

    def correct_ambiguous(self, text: str, img_bytes: bytes) -> str:
        try:
            preds = self.ocr.classification_all(img_bytes)
            if text in self.AMBIG:
                for alt in self.AMBIG[text]:
                    if alt in preds:
                        return alt
        except Exception:
            pass
        return text

    def recognize_text(self, image_bytes: bytes) -> str:
        try:
            gray = self.to_grayscale(image_bytes)
            return self.ocr.classification(gray)
        except Exception as e:
            print(f"[recognize_text] 错误: {e}")
            return ""

    def recognize_text_with_position(self, image_bytes: bytes) -> list[dict]:
        try:
            gray = self.to_grayscale(image_bytes)
            img = Image.open(BytesIO(gray))
            boxes = sorted(self.det_ocr.detection(gray), key=lambda b: (b[1], b[0]))

            results = []
            for box in boxes:
                # 1. 加边缘 & 放大
                region = self.crop_with_margin(img, box)
                # 2. 锐化 & 对比度
                region = self.enhance_image(region)
                # 3. 二值化 & 闭运算
                region = self.binarize_and_close(region)
                # 4. OCR & 过滤非汉字
                buf = BytesIO()
                region.save(buf, 'PNG')
                img_b = buf.getvalue()
                text = self.ocr.classification(img_b)
                text = re.sub(r"[^\u4e00-\u9fa5]", "", text)
                # 5. 二次纠错
                text = self.correct_ambiguous(text, img_b)

                results.append({
                    "text": text,
                    "position": box,
                    "center": ((box[0] + box[2]) // 2, (box[1] + box[3]) // 2)
                })
            return results
        except Exception as e:
            print(f"[recognize_text_with_position] 错误: {e}")
            return []

    def visualize_results(self, image_bytes: bytes, results: list[dict]) -> Image.Image:
        try:
            img = Image.open(BytesIO(image_bytes)).convert("RGB")
            draw = ImageDraw.Draw(img)
            for item in results:
                x1, y1, x2, y2 = item["position"]
                draw.rectangle([x1, y1, x2, y2], outline="red", width=1)
                draw.text((x1, y1 - 18), item["text"], font=self.font, fill="blue")
            return img
        except Exception as e:
            print(f"[visualize_results] 错误: {e}")
            return None

# 使用示例
if __name__ == "__main__":
    ocr = Ddddocr()
    with open("captcha.png", "rb") as f:
        img_bytes = f.read()

    print("整图识别结果:", ocr.recognize_text(img_bytes))
    details = ocr.recognize_text_with_position(img_bytes)
    for d in details:
        print(d)
    img = ocr.visualize_results(img_bytes, details)
    if img:
        img.show()
