import pyautogui
from utils.config import config
from utils.log import log
import time
from utils.WHDUtils import  WHDUtils
from utils.OCRUtils import OCRUtils
import win32api
import cv2 as cv
import numpy as np
import math
import random
from PIL import Image

def get_mapping(x):
    if x in config.origin_key:
        x = config.mapping[config.origin_key.index(x)] # 找出能用的按键
    return x

def keyDown(x):
    pyautogui.keyDown(get_mapping(x))
def keyUp(x):
    pyautogui.keyUp(get_mapping(x))

'''键鼠操作'''
class KeyOpsUtils:
    def __init__(self):
        self.whd = WHDUtils() # 初始化窗口句柄
        self.ocr = OCRUtils() # 初始化OCR
        self._stop = False # 是否停止校准

    # 按压
    def press(self, c, t=0):
        if c not in "3r":
            log.debug(f"按下按钮 {c}，等待 {t} 秒后释放")
        if self._stop == False:
            keyDown(c)
        else:
            raise ValueError("正在退出")
        time.sleep(t)
        keyUp(c)

    # 计算点位
    def calc_point(self, point, offset):
        return (point[0] - offset[0] / self.whd.xx, point[1] - offset[1] / self.whd.yy)

    # 点击一个点
    def click(self, points, click=True):
        x, y = points
        # 如果是浮点数表示，则计算实际坐标
        if type(x) != type(0):
            x, y = self.whd.x1 - int(x * self.whd.xx), self.whd.y1 - int(y * self.whd.yy)
        # 全屏模式会有一个偏移
        if self.whd.is_full:
            x += 9
            y += 9
        if self._stop == False:
            win32api.SetCursorPos((x, y))
            if click:
                pyautogui.click()
        else:
            raise ValueError("正在退出")
        time.sleep(0.3)

    # 拖动
    def drag(self, pt1, pt2):
        x1, y1 = pt1
        x1, y1 = self.whd.x1 - int(x1 * self.whd.xx), self.whd.y1 - int(y1 * self.whd.yy)
        x2, y2 = pt2
        x2, y2 = self.whd.x1 - int(x2 * self.whd.xx), self.whd.y1 - int(y2 * self.whd.yy)
        # 全屏模式会有一个偏移
        if self.whd.is_full:
            x1 += 9
            y1 += 9
            x2 += 9
            y2 += 9
        win32api.SetCursorPos((x1, y1))
        time.sleep(0.2)
        pyautogui.drag(x2 - x1, y2 - y1, 0.4)
        time.sleep(0.3)

    # 点击文本
    def click_text(self, text, num = 1, frq=0.0, sim=True, env=None, click=True):
        img = self.whd.get_cur_whd_screenshort() # 获取窗口图像
        pt = self.ocr.find_text(np.asarray(img), text, env=env) # ocr识别文字位置
        if pt is not None:
            if click:
                for i in range(num):
                    self.click(
                        (
                        1 - (pt[0][0] + pt[1][0]) / 2 / self.whd.xx,
                        1 - (pt[0][1] + pt[2][1]) / 2 / self.whd.yy,
                        )
                    )
                    if frq == 0.0:
                        # 正常模式
                        time.sleep(0.01 + 0.01 * math.sqrt(i))
                        print(0.01 + 0.01 * math.sqrt(i))
                    else:
                        if sim:
                            # 仿真模式
                            random.randint(1, 10)
                            time.sleep(frq * random.randint(1, 10))
                        else:
                            # 手动定频模式
                            time.sleep(frq)
            return True
        return False

    # 由click_target调用，返回图片匹配结果
    def scan_screenshot(self, prepared):
        # temp = pyautogui.screenshot()
        temp = self.whd.get_cur_whd_screenshort()
        screenshot = np.array(temp)
        screenshot = cv.cvtColor(screenshot, cv.COLOR_BGR2RGB)
        result = cv.matchTemplate(screenshot, prepared, cv.TM_CCOEFF_NORMED)
        min_val, max_val, min_loc, max_loc = cv.minMaxLoc(result)
        # 画框
        # th,tw = prepared.shape[:2] # 模板宽高
        # cv.rectangle(screenshot,max_loc,(max_loc[0]+tw,max_loc[1]+th),(0,0,255),2)
        # Image.fromarray(np.uint8(screenshot)).show()
        return {
            "screenshot": screenshot,
            "min_val": min_val,
            "max_val": max_val,
            "min_loc": min_loc,
            "max_loc": max_loc,
        }

    # 计算匹配中心点坐标
    def calculated(self, result, shape):
        # 左，上
        mat_top, mat_left = result["max_loc"]
        prepared_height, prepared_width, prepared_channels = shape
        x = 1 - (mat_top + prepared_width / 2)  / self.whd.xx
        y = 1 - (mat_left + prepared_height / 2)  / self.whd.yy
        return x, y

    # 点击与模板匹配的点，flag=True表示必须匹配，不匹配就会一直寻找直到出现匹配
    def click_target(self, target_path, threshold=0.6, flag=True):
        target = cv.imread(target_path)
        while True:
            result = self.scan_screenshot(target)
            if result["max_val"] > threshold:
                points = self.calculated(result, target.shape)
                self.click(points)
                return
            if flag == False:
                return

