import os
import re
import time
import traceback
from datetime import datetime

from paddleocr import PaddleOCR, TextDetection
import numpy as np
from PIL import ImageGrab, Image
import cv2
from loguru import logger
from imutils.object_detection import non_max_suppression


class SingletonMeta(type):
    """单例模式元类"""
    _instances = {}

    def __call__(cls, *args, **kwargs):
        if cls not in cls._instances:
            cls._instances[cls] = super().__call__(*args, **kwargs)
        return cls._instances[cls]


class VisionProcess(metaclass=SingletonMeta):
    def __init__(self):
        self.ocr = PaddleOCR(
            text_detection_model_name="PP-OCRv5_server_det",
            text_detection_model_dir="D:\Dev\pabddle_module_dir\PP-OCRv5_server_det_infer",
            text_recognition_model_name="PP-OCRv5_server_rec",
            use_doc_orientation_classify=False,
            use_doc_unwarping=False,
            use_textline_orientation=False,
            device="gpu:0"
            # enable_hpi=True
        ) # 更换 PP-OCRv5_server 模型测+文本识别

    @staticmethod
    def _get_processed_image(x1, y1, x2, y2, expand=False, scale=1, save_screenshot=False):
        """统一处理截图的函数"""
        img = ImageGrab.grab(bbox=(x1, y1, x2, y2))
        if not expand:
            return np.array(img) # 直接转换为numpy数组

        original_width, original_height = img.size
        new_width = int(original_width * scale)
        new_height = int(original_height * scale)

        expanded_img = Image.new("RGB", (new_width, new_height), (0, 0, 0))
        paste_x = (new_width - original_width) // 2
        paste_y = (new_height - original_height) // 2
        expanded_img.paste(img, (paste_x, paste_y))

        if save_screenshot:
            os.makedirs("./temp", exist_ok=True)
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            filename = f"./temp/screenshot_{x1}_{y1}_{x2}_{y2}_{timestamp}.png"
            expanded_img.save(filename)
            logger.info(f"截图已保存到: {filename}")

        return np.array(expanded_img)

    def get_ocr_results(self, x1, y1, x2, y2, expand=False, scale=1, save_screenshot=False):
        """
        获取指定区域的OCR识别结果，返回坐标和文本组合的列表

        Args:
            :param scale: 扩展比例
            :param expand: 是否扩展图像
            :param save_screenshot: 是否保存截图
            :param y2: 右下角y坐标
            :param x2: 右下角x坐标
            :param y1: 左上角y坐标
            :param x1: 左上角x坐标

        Returns:
            list: 包含元组的列表，每个元组格式为 (中心点坐标(x,y), 识别文本)
        """
        # 获取处理后的图像
        ocr_start = time.time()
        image_data = self._get_processed_image(
            x1, y1, x2, y2, expand=expand, scale=scale, save_screenshot=save_screenshot
        )
        # 进行OCR预测
        predict = self.ocr.predict(image_data)
        # 计算图像扩展导致的偏移量
        offset_x, offset_y = 0, 0
        if expand:
            original_width = x2 - x1
            original_height = y2 - y1
            new_width = int(original_width * scale)
            new_height = int(original_height * scale)
            offset_x = (new_width - original_width) // 2
            offset_y = (new_height - original_height) // 2

        # 提取并处理结果
        results = []
        for i in range(len(predict[0]['dt_polys'])):
            try:
                # 获取OCR识别框坐标（假设格式为[x1, y1, x2, y2]）
                rec_box = predict[0]['rec_boxes'][i]

                # 计算中心点坐标，减去扩展带来的偏移量
                center_x = (rec_box[0] + rec_box[2]) / 2 - offset_x + x1
                center_y = (rec_box[1] + rec_box[3]) / 2 - offset_y + y1

                # 获取识别文本
                rec_text = predict[0]['rec_texts'][i]

                # 添加到结果列表
                results.append(((center_x, center_y), rec_text))
            except (IndexError, KeyError) as e:
                logger.error(f"处理OCR结果时出错: {e}")
                continue
        logger.info(f"获取区域 ({x1}, {y1}, {x2}, {y2}) 耗时: {time.time() - ocr_start:.2f} 秒\n获取结果如下：\n{results}")
        return results

    def find_text(self, text, x1, y1, x2, y2, save=False):
        """
        在指定区域查找文本并返回其中心坐标

        Args:
            text: 要查找的文本
            x1, y1, x2, y2: 查找区域的坐标
            threshold: 匹配阈值（保留参数，未使用）
            save: 是否保存截图

        Returns:
            (center_x, center_y, confidence): 文本中心坐标和置信度
        """
        logger.debug(f"查找文本: {text} 在区域 ({x1}, {y1}, {x2}, {y2})")
        time.sleep(0.5)

        # 使用统一的OCR方法获取结果
        ocr_results = self.get_ocr_results(x1, y1, x2, y2)

        if not ocr_results:
            logger.info(f"未找到文本 {text}")
            if save:
                os.makedirs("temp", exist_ok=True)

                self._get_processed_image(x1, y1, x2, y2, save_screenshot=True)
            return -1, -1, 0

        # 查找最匹配的文本
        best_match = None
        highest_confidence = 0

        for (center_x, center_y), detected_text in ocr_results:
            # 简单包含匹配，可以根据需要改为更复杂的匹配算法
            if text in detected_text:
                confidence = len(text) / len(detected_text)  # 简单计算匹配度
                if confidence > highest_confidence:
                    highest_confidence = confidence
                    best_match = (center_x, center_y)

        if best_match:
            center_x, center_y = best_match
            logger.info(f"找到文本 {text}，坐标: ({center_x}, {center_y})，匹配度: {highest_confidence:.2f}")
            return int(center_x), int(center_y), highest_confidence
        else:
            logger.info(f"未找到符合条件的文本 {text}, OCR结果: {ocr_results}")
            return -1, -1, 0

    def find_all_images(self, template_path, x1, y1, x2, y2, threshold=0.8, min_dist=10,show=False, save_path=None):
        screenshot_cv = cv2.cvtColor(self._get_processed_image(x1, y1, x2, y2), cv2.COLOR_RGB2BGR)
        template = cv2.imread(template_path, cv2.IMREAD_COLOR)

        result = cv2.matchTemplate(screenshot_cv, template, cv2.TM_CCOEFF_NORMED)
        h, w = template.shape[:2]

        # 找到所有得分大于阈值的位置
        y_coords, x_coords = np.where(result >= threshold)

        # 转换为原图上的矩形框 [(startX, startY, endX, endY), ...]
        rects = []
        for (x, y) in zip(x_coords, y_coords):
            start_x = x + x1
            start_y = y + y1
            rects.append((start_x, start_y, start_x + w, start_y + h))

        # 使用 NMS 去重
        rects_np = np.array(rects)
        pick = non_max_suppression(rects_np, probs=None, overlapThresh=0.3)

        # 得到最终的中心点坐标
        filtered_points = [((x1 + x2) // 2, (y1 + y2) // 2) for (x1, y1, x2, y2) in pick]
        return filtered_points

    def get_run_time(self, x1=897, y1=0, x2=1015, y2=27, scale=2):
        """
        获取运行时间，格式为"MM:SS"

        Args:
            x1, y1, x2, y2: 时间显示区域的坐标
            :param scale:
            :param y2:
            :param x2:
            :param y1:
            :param x1:

        Returns:
            (time_str, seconds): 时间字符串和对应的秒数

        """
        try:
            time.sleep(0.5)

            # 使用统一的OCR方法获取结果
            ocr_results = self.get_ocr_results(x1, y1, x2, y2, expand=True, scale=scale)

            if not ocr_results:
                logger.info("未找到时间")
                return 0, 60

            # 查找格式为"MM:SS"的文本
            for _, detected_text in ocr_results:
                if re.match(r'\d+:\d+', detected_text):
                    parts = detected_text.split(':')
                    if len(parts) == 2:
                        try:
                            minutes = int(parts[0])
                            seconds = int(parts[1])
                            total_seconds = minutes * 60 + seconds
                            logger.info(f"找到运行时间: {detected_text} ({total_seconds}秒)")
                            return detected_text, total_seconds
                        except ValueError:
                            continue

            logger.info(f"未找到时间格式，OCR结果: {ocr_results}")
            return 0, 60

        except Exception as e:
            logger.error(f"get_run_time错误: {e}")
            return 0, 60

    def get_all_coordinates_and_text(self, x1, y1, x2, y2, expand=False, scale=1, save_screenshot=False):
        try:
            results = self.get_ocr_results(x1, y1, x2, y2, expand=expand, scale=scale, save_screenshot=save_screenshot)
            return [(x, y, z) for ((x, y), z) in results]

        except Exception as e:
            error_info = traceback.format_exc()
            logger.error(f"get_all_coordinates_and_text错误: {e}\n{error_info}")
            return []
