import pyautogui
import win32gui
import cv2
import numpy as np
import time
from PIL import Image
import logging
import os
os.environ['CCACHE_DISABLE'] = '1'
logging.disable(logging.CRITICAL)  # 完全禁用所有日志
logging.getLogger().setLevel(logging.ERROR)
logging.getLogger('ppocr').setLevel(logging.ERROR)
from paddleocr import PaddleOCR

class WindowCaptureAndSearch:
    def __init__(self, window_title):
        """
        初始化时查找并存储目标窗口
        :param window_title: 窗口标题
        """
        self.window_title = window_title
        self.target_window = None
        self.reader = PaddleOCR(lang='ch')
        logging.getLogger('ppocr').setLevel(logging.WARNING)
        self._find_target_window()

    def _find_target_window(self):
        """
        查找目标窗口并存储
        """
        # 获取所有窗口信息
        windows = pyautogui.getAllWindows()
        
        # 查找指定窗口
        for window in windows:
            if window.title == '':
                continue
            if self.window_title in window.title:
                self.target_window = window
                _hwnd = window._hWnd
                client_rect = win32gui.GetClientRect(_hwnd)
                top_left = win32gui.ClientToScreen(_hwnd, (client_rect[0], client_rect[1]))
                self.tl_x, self.tl_y = top_left
                break
        
        if not self.target_window:
            print(f"窗口标题 '{self.window_title}' 未找到。")
    
    def activate_window(self):
        """
        激活目标窗口
        """
        if not self.target_window:
            print("目标窗口未找到，无法激活。")
            return False
        
        try:
            self.target_window.activate()
            time.sleep(0.5)  # 等待窗口激活
            return True
        except Exception as e:
            print(f"激活窗口时出错: {e}")
            print('请确保窗口标题正确且窗口处于激活状态。')
            time.sleep(5)
            return True
    
    def take_screenshot(self, region=None):
        """
        截取指定范围的图片
        :param region: 指定的区域，格式为 [left, top, width, height]，默认为None（整个窗口）
        :return: 截图的PIL Image对象
        """
        if region is None:
            region = [0,0,1920,1080]
        
        if not self.target_window:
            print("目标窗口未找到，无法截取屏幕。")
            return None
        
        # 激活窗口
        if not self.activate_window():
            return None
        
        # 获取窗口的屏幕坐标
        left, top, width, height = self.target_window.left + 8, self.target_window.top + 31, self.target_window.width, self.target_window.height
        # left, top, width, height = self.target_window.left, self.target_window.top, self.target_window.width, self.target_window.height
        
        # 如果提供了搜索区域，则调整截图范围
        if region is not None and isinstance(region, list) and len(region) == 4:
            r_left, r_top, r_width, r_height = region
            screenshot = pyautogui.screenshot(region=(left + r_left, top + r_top, r_width, r_height))
        else:
            screenshot = pyautogui.screenshot(region=(left, top, width, height))
        
        # 临时显示截图
        # screenshot.show()

        return screenshot
    
    def find_image(self, image_path, confidence=0.8, search_region=None):
        """
        在指定窗口中查找本地图片并返回其相对于窗口的位置
        :param image_path: 本地图片路径
        :param confidence: 匹配的置信度阈值，默认为0.8
        :param search_region: 搜索区域，格式为 [left, top, width, height]，默认为None（整个窗口）
        :return: 图片在窗口中的相对位置 (x, y)，如果没有找到则返回 None
        """
        screenshot = self.take_screenshot(region=search_region)
        if screenshot is None:
            return None
        
        screenshot_np = np.array(screenshot)
        screenshot_rgb = cv2.cvtColor(screenshot_np, cv2.COLOR_BGR2RGB)
        
        # 读取目标图片
        template = cv2.imread(image_path)
        if template is None:
            print(f"图片路径 '{image_path}' 未找到或无法读取。")
            return None
        
        # 进行模板匹配
        result = cv2.matchTemplate(screenshot_rgb, template, cv2.TM_CCOEFF_NORMED)
        min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)
        
        # 定义阈值
        if max_val >= confidence:
            # 计算中心位置相对于窗口的位置
            h, w = template.shape[:2]
            relative_center_x = max_loc[0] + w // 2
            relative_center_y = max_loc[1] + h // 2
            
            # 如果提供了搜索区域，则调整相对位置
            if search_region is not None and isinstance(search_region, list) and len(search_region) == 4:
                relative_center_x += search_region[0]
                relative_center_y += search_region[1]
            
            return (relative_center_x, relative_center_y)
        else:
            print("在指定窗口中未找到图片。")
            return None

    def crop_image(self,image, region):
        """
        裁剪PIL图像的指定区域
        
        :param pil_image: 输入的PIL Image对象
        :param box: 包含[x, y, w, h]坐标的列表
        :return: 裁剪后的PIL Image对象
        """
        # 将PIL Image转换为OpenCV格式
        open_cv_image = np.array(image)
        
        # OpenCV使用BGR格式，而PIL使用RGB格式，因此需要转换颜色空间
        open_cv_image = cv2.cvtColor(open_cv_image, cv2.COLOR_RGB2BGR)
        
        x, y, w, h = region
        
        # 使用OpenCV裁剪图像
        cropped_image = open_cv_image[y:y+h, x:x+w]
        
        # 将裁剪后的OpenCV图像转换回PIL Image格式
        cropped_pil_image = Image.fromarray(cv2.cvtColor(cropped_image, cv2.COLOR_BGR2RGB))
        
        return cropped_pil_image
    
    def ocr_screenshot(self,screenshot=None, region=None,only_text=0):
        """
        截取指定范围的图片并进行 OCR 识别
        :param region: 指定的区域，格式为 [left, top, width, height]，默认为None（整个窗口）
        :return: 识别到的文字以及位置
        """
        if screenshot is None:
            screenshot = self.take_screenshot(region=region)
        if not screenshot is None and not region is None:
            screenshot = self.crop_image(screenshot, region)
        # 使用 easyocr 进行 OCR 识别
        results = self.reader.ocr(np.array(screenshot))[0]
        if results is None:
            return [{
                'text': 'None',
                'confidence': 0,
                'center': [0,0]
            }]
        if only_text == 0:
            return [r[1][0] for r in results]
        # 提取文字和位置信息
        extracted_results = []
        for r in results:
            text = r[1][0]
            confidence = r[1][1]
            rectangle_points= r[0]
            center = (int(sum(point[0] for point in rectangle_points) / 4), int(sum(point[1] for point in rectangle_points) / 4))
            extracted_results.append({
                'text': text,
                'confidence': confidence,
                'center': center
            })
        
        return extracted_results
    
def test(image_path,confidence):
     # 查找图片
    position = finder.find_image(image_path, confidence, search_region)
    if position:
        print(f"图片在窗口中的相对位置: {position}")
    else:
        print("图片未找到。")
    
    # 进行 OCR 识别
    ocr_result = finder.ocr_screenshot(region=search_region)
    if ocr_result:
        for result in ocr_result['results']:
            print(f"文字: {result['text']}, 置信度: {result['confidence']}, 位置: {result['position']}")
    else:
        print("OCR 识别失败。")

    while True:
        time.sleep(1)
        screenshot = finder.take_screenshot(region=search_region)
        if screenshot is None:
            print("截图失败。")
            continue

if __name__ == "__main__":
    search_region = [0, 0, 1920, 1080]  # 搜索区域，格式为 [left, top, width, height]
    finder = WindowCaptureAndSearch("The First Descendant")
    finder.take_screenshot()
    finder.take_screenshot(region=[1800, 900, 120, 180])
    time.sleep(999)