import ctypes
import io
import threading

import pyautogui
import ddddocr
import winsound
import pygetwindow as gw

import time
import logging
from PIL import Image
import numpy as np
import cv2
import random
import rembg  # 确保已经安装rembg库
from PyQt5.QtCore import QByteArray, QBuffer
from ultralytics import YOLO

# 设置日志配置
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)

# 创建文件处理器
file_handler = logging.FileHandler('game_log.txt')
file_handler.setLevel(logging.DEBUG)

# 创建控制台处理器
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)

# 创建日志格式
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
file_handler.setFormatter(formatter)
console_handler.setFormatter(formatter)

# 添加处理器到logger
logger.addHandler(file_handler)
logger.addHandler(console_handler)


class Game:
    class Button:
        def __init__(self, cls_index, cls_text=None, box=None):
            self.box = box  # box格式为 (x1, y1, x2, y2)
            self.cls_index = cls_index
            self.cls_text = cls_text

        def click(self):
            print("点击事件")

        def pos(self):
            if self.box is None:
                logger.warning("按钮未初始化，无法点击。")
                return

            # 计算box的中心点
            center_x = (self.box[0] + self.box[2]) // 2
            center_y = (self.box[1] + self.box[3]) // 2
            return center_x, center_y

    def __init__(self, names):
        self.names = names
        self.num_btn = Game.Button(0)
        self.icon_btn = Game.Button(1)
        self.left_btn = Game.Button(2)
        self.right_btn = Game.Button(3)
        self.target_icon_btn = Game.Button(4)
        self.ok_btn = Game.Button(5)
        self.help_btn = Game.Button(8)
        # self.change_btn = Game.Button(9)
        # self.nosee_btn = Game.Button(10)
        self.model = YOLO("best.pt")
        self.ocr_model = ddddocr.DdddOcr(show_ad=False)  # 初始化OCR模型

    # 根据 标签+BOXS 点击指定匹配的位置
    def btn_pos(self, cls_text, boxes):
        cls_index = -1
        for key, value in self.names.items():
            if value == cls_text:
                cls_index = key
                break

        if cls_index in boxes.cls:
            n = list(boxes.cls).index(cls_index)
            xyxy = boxes.xyxy[n]
            x0, y0, x1, y1 = map(int, map(round, xyxy.cpu().numpy()))
            btn = Game.Button(cls_index, cls_text, (x0, y0, x1, y1))
            x, y = btn.pos()
            return True, x, y
        return False, 0, 0

    def auto_check(self, direction, steps, boxes):
        # 检查方向并点击指定次数
        logger.info(f"自动验证方向: 向 '{direction}' -> '{steps}' 次.")

    def toggle_gua(self):
        """切换挂机状态"""
        logger.info("F1切换挂机状态.")
        pyautogui.press("f1")

    def help(self):
        """点击帮助按钮"""
        logger.info("执行 help 按钮点击.")
        self.help_btn.click()

    def change(self):
        """点击切换按钮"""
        logger.info("执行 change 按钮点击.")
        # self.change_btn.click()

    def ok(self):
        """点击确认按钮"""
        logger.info("执行 ok 按钮点击.")
        self.ok_btn.click()

    def icon(self, image):
        """处理图标图像"""
        logger.info("开始处理图标区域.")
        marked_image_path = './screenshot/icon-processed.png'

        # 使用rembg去除背景
        img_bg_removed = rembg.remove(image).convert('RGB')

        # 转换为OpenCV格式并处理
        cv_image = cv2.pyrMeanShiftFiltering(np.array(img_bg_removed), sp=8, sr=60)

        # 转换为灰度图像并二值化处理
        gray_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY)
        _, binary_image = cv2.threshold(gray_image, 40, 255, cv2.THRESH_BINARY)

        # 使用形态学操作去除小杂点
        kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (7, 7))  # 使用7x7的结构元素
        morph_image = cv2.morphologyEx(binary_image, cv2.MORPH_CLOSE, kernel)  # 闭操作，连接对象
        morph_image = cv2.morphologyEx(morph_image, cv2.MORPH_OPEN, kernel)  # 进一步去除噪点

        # 查找轮廓
        contours, _ = cv2.findContours(morph_image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        result_boxes = []

        logger.info("开始处理轮廓.")
        for index, contour in enumerate(contours):
            # 使用最小外接矩形标注轮廓
            x, y, w, h = cv2.boundingRect(contour)  # 获取长方形的坐标和宽高
            color = [random.randint(0, 255) for _ in range(3)]  # 随机颜色
            cv2.rectangle(cv_image, (x, y), (x + w, y + h), color, 1)  # 用随机颜色绘制长方形

            # 确保裁剪区域的坐标是有效的
            icon_crop = img_bg_removed.crop((x, y, x + w, y + h)).convert('RGB')
            icon_crop.save(f"./screenshot/icon-{index + 1}.png") if logger.isEnabledFor(logging.DEBUG) else None

            result_boxes.append((x, y, x + w, y + h))  # 存储长方形的坐标

        # 转回PIL格式并保存标注后的图片
        marked_image = Image.fromarray(cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB))
        marked_image.save(marked_image_path) if logger.isEnabledFor(logging.DEBUG) else None

        # 如果有轮廓，裁剪第一个轮廓区域并显示
        if result_boxes:
            x_min = min(box[0] for box in result_boxes)
            y_min = min(box[1] for box in result_boxes)
            x_max = max(box[2] for box in result_boxes)
            y_max = max(box[3] for box in result_boxes)

            final_icon = marked_image.crop((x_min, y_min, x_max, y_max)).convert('RGB')
            final_icon.save("./screenshot/icon_final.png") if logger.isEnabledFor(logging.DEBUG) else None
            logger.info("图标处理完成，保存为 icon_final.png.")
            return final_icon  # 返回处理后的图像
        else:
            logger.warning("没有找到轮廓。")
            return None

    def ocr(self, image):
        """使用ddddocr对图像进行OCR识别"""
        logger.info("开始进行OCR识别.")
        result = self.ocr_model.classification(image).strip()
        logger.info("OCR识别结果: %s", result)
        return result

    @staticmethod
    def pixmap_to_pil_image(pixmap):
        # 将 QPixmap 转换为 QImage
        qimage = pixmap.toImage()

        # 创建字节数组和 QBuffer
        byte_array = QByteArray()
        buffer = QBuffer(byte_array)
        buffer.open(QBuffer.ReadWrite)

        # 将 QImage 保存到 QBuffer
        qimage.save(buffer, "PNG")  # 可以选择不同的格式，如 "JPEG"

        # 从字节数组中读取数据并创建 PIL Image
        buffer.close()  # 关闭 QBuffer
        pil_image = Image.open(io.BytesIO(byte_array))

        return pil_image

    def analyze(self, num_img):
        """分析图像并返回结果"""
        logger.info("开始分析图像.")
        coding = False
        recognized_results = []

        if num_img:
            recognized_results = self.ocr_model.classification(num_img)  # 识别 num_img 图像
            logger.info("识别结果: %s", recognized_results)

            # 判断是否包含指定的数字
            if any(number in recognized_results for number in
                   ["15", "30", "45", "60", "75", "90", "105", "120", "135", "150", "165"]):
                coding = True

        logger.info("coding 状态: %s", coding)
        return coding, recognized_results

    def find(self, icon_image, target_image):
        """对 icon_image 图像进行旋转，并与 target_image 进行匹配度对比"""
        logger.info("开始寻找最佳匹配角度.")

        # 将图像转换为灰度并进行高斯模糊
        icon_gray = cv2.cvtColor(np.array(icon_image), cv2.COLOR_BGR2GRAY)
        icon_blurred = cv2.GaussianBlur(icon_gray, (5, 5), 0)

        target_gray = cv2.cvtColor(np.array(target_image), cv2.COLOR_BGR2GRAY)
        target_blurred = cv2.GaussianBlur(target_gray, (5, 5), 0)

        # 设置常量旋转步长
        rotation_step = 30
        best_angle = 0
        best_match_value = 0
        best_rotated_icon = None

        # 获取 icon_image 的尺寸
        (h, w) = icon_blurred.shape[:2]

        for angle in range(0, 360, rotation_step):
            # 计算旋转中心
            center = (w // 2, h // 2)
            # 构造旋转矩阵
            M = cv2.getRotationMatrix2D(center, angle, 1.0)
            # 进行旋转
            rotated_icon = cv2.warpAffine(icon_blurred, M, (w, h))

            # 计算匹配度
            match_value = self.match_images(rotated_icon, target_blurred)

            # 更新最佳匹配
            if match_value > best_match_value:
                best_match_value = match_value
                best_angle = angle
                best_rotated_icon = rotated_icon

        logger.info("最佳匹配角度: %d, 匹配度: %f", best_angle, best_match_value)

        # 计算最快的旋转方向、旋转角度和旋转次数
        if best_angle <= 180:
            direction = "右"
            steps = best_angle // rotation_step
        else:
            direction = "左"
            steps = (360 - best_angle) // rotation_step

        return direction, best_angle, steps, best_rotated_icon, best_match_value

    def match_images(self, img1, img2):
        """计算两幅图像之间的匹配度"""
        result = cv2.matchTemplate(img2, img1, cv2.TM_CCOEFF_NORMED)
        (_, max_val, _, _) = cv2.minMaxLoc(result)
        return max_val

    def notify(self, option=""):
        sound_dir = "sound/"
        """根据选项播放对应的音频并发出警报"""
        if option == "checking":
            winsound.PlaySound(sound_dir + "checking.wav", winsound.SND_FILENAME | winsound.SND_ASYNC)
        elif option == "exit":
            winsound.PlaySound(sound_dir + "exit.wav", winsound.SND_FILENAME | winsound.SND_ASYNC)
        elif option == "ka":
            winsound.PlaySound(sound_dir + "ka.wav", winsound.SND_FILENAME | winsound.SND_ASYNC)
        elif option == "autocheck_begin":
            winsound.PlaySound(sound_dir + "autocheck_begin.wav", winsound.SND_FILENAME | winsound.SND_ASYNC)
        elif option == "autocheck_fail":
            winsound.PlaySound(sound_dir + "autocheck_success.wav", winsound.SND_FILENAME | winsound.SND_ASYNC)
        elif option == "autocheck_fail":
            winsound.PlaySound(sound_dir + "autocheck_fail.wav", winsound.SND_FILENAME | winsound.SND_ASYNC)

        # 发出警报声
        for i in range(0, 3):
            winsound.Beep(1600, 100)
            time.sleep(0.2)
            winsound.Beep(2600, 300)
            time.sleep(1.0)


def get_time():
    return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
