import cv2
import numpy as np
import easyocr

class AiUtil(object):

    # this needs to run only once to load the model into memory
    reader: easyocr.Reader = easyocr.Reader(['ch_sim', 'en'])

    def __init__(self):
        pass

    @classmethod
    def instance(cls, *args, **kwargs):
        if not hasattr(AiUtil, "_instance"):
            AiUtil._instance = AiUtil(*args, **kwargs)
        return AiUtil._instance

    # 用特征点去识别图片
    @staticmethod
    def find_sift(source_image_path, search_image_path):
        # 读取两个图片
        source_image = cv2.imread(source_image_path, 0)
        template_image = cv2.imread(search_image_path, 0)

        # 初始化 SIFT 检测器
        sift = cv2.SIFT_create()

        # 检测和计算具有 SIFT 的关键点和描述符
        kp1, des1 = sift.detectAndCompute(source_image, None)
        kp2, des2 = sift.detectAndCompute(template_image, None)

        # 使用 BFMatcher 进行匹配
        bf = cv2.BFMatcher(crossCheck=True)  # 匹配对象
        matches = bf.match(des1, des2)  # 进行两个特征矩阵的匹配

        # 按照距离排序，距离越小越好
        matches = sorted(matches, key=lambda x: x.distance)

        # 提取匹配的关键点
        src_pts = np.float32([kp2[m.trainIdx].pt for m in matches]).reshape(-1, 1, 2)
        dst_pts = np.float32([kp1[m.queryIdx].pt for m in matches]).reshape(-1, 1, 2)

        # 计算单应性矩阵
        M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)

        # 使用单应性矩阵将img2的四个角变换到img1的坐标系
        h, w = template_image.shape
        pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).reshape(-1, 1, 2)
        dst = cv2.perspectiveTransform(pts, M)

        # 在img1上画出img2的位置
        img1_draw = cv2.polylines(cv2.cvtColor(source_image, cv2.COLOR_GRAY2BGR), [np.int32(dst)], True, (255, 0, 0), 3)

        # 计算img2在img1中的中心点
        center_x = np.mean(dst[:, 0, 0])
        center_y = np.mean(dst[:, 0, 1])
        center_point = (int(center_x), int(center_y))

        # 在img1上画出中心点
        cv2.circle(img1_draw, center_point, 5, (0, 255, 0), -1)

        # [(a, b), (a1, b1), ...]
        pypts = []
        for npt in dst.astype(int).tolist():
            pypts.append(tuple(npt[0]))

        result = dict(
            center=center_point,
            rectangle=pypts,
        )

        print(f"find_sift result = {result}")
        return result

    # 识别图片文字
    @staticmethod
    def ocr_image_text(image_path):
        ocr_result = AiUtil.instance().reader.readtext(image_path)
        # print(f"ocr_img_text result = {ocr_result}")
        return ocr_result

    # 找图片中对应文字
    @staticmethod
    def find_text(image_path, search_text=None, search_texts=None):
        if search_texts is None:
            search_texts = []
        _result = AiUtil.ocr_image_text(image_path)
        for item in _result:
            # print(f"find_text item = {item}")
            ocr_text = item[1]
            if search_text is not None:
                if ocr_text == search_text:
                    return item
            elif search_texts is not None:
                for text in search_texts:
                    if ocr_text == text:
                        return item
        return None
