import time

import cv2
import win32gui
import re
from screenshot import  screenshot
#https://github.com/RapidAI/RapidOCR
from rapidocr_onnxruntime import RapidOCR
import numpy as np
engine = RapidOCR()
def is_digit(s):
    return bool(re.match(r'^-?\d+(\.\d+)?$', s))
def keep_digits_and_minus(s):
    # 使用正则表达式匹配非数字和非减号的字符，并替换为空字符串
    return re.sub(r'[^0-9\-]', '', s)
def split_string(ss):
    t=""
    t_list=[]
    for s in ss:
        # 如果不是数字

        if not s.isdigit() :

            t=keep_digits_and_minus(t)
            if t != "":
                t_list.append(t)
            t=""
        t += s
    t = keep_digits_and_minus(t)
    if t!="" and t!="-":
        t_list.append(t)
    return t_list
def expand_image(img, expand_size, grayscale=False,contrast_factor=1.0,saturation_factor=1.0,brightness_factor=1.0,adaptive_histogram=False, clip_limit=2.0, grid_size=(80, 20)):

    if img.shape[2] == 4:
        img = img[:, :, :3]  # Keep only RGB channels
    # Get image dimensions
    height, width = img.shape[:2]

    # Create a new array for the expanded image
    expanded_img = np.zeros((height + 2 * expand_size, width + 2 * expand_size, 3), dtype=np.uint8)

    # Copy the original image to the center of the expanded image
    expanded_img[expand_size:expand_size + height, expand_size:expand_size + width] = img



    # Fill the expanded borders with colors close to the original image edges
    # Top border
    expanded_img[:expand_size, expand_size:expand_size + width] = img[0]
    # Bottom border
    expanded_img[expand_size + height:, expand_size:expand_size + width] = img[-1]
    # Left border
    expanded_img[:, :expand_size] = expanded_img[:, expand_size].reshape(-1, 1, 3)
    # Right border
    expanded_img[:, expand_size + width:] = expanded_img[:, expand_size + width - 1].reshape(-1, 1, 3)
    expanded_img = cv2.cvtColor(expanded_img, cv2.COLOR_RGB2BGR)

    # 对比度
    if contrast_factor != 1.0:
        mean_intensity = np.mean(expanded_img)
        adjusted_img = (expanded_img - mean_intensity) * contrast_factor + mean_intensity
        expanded_img = np.clip(adjusted_img, 0, 255).astype(np.uint8)
    #  饱和度
    if saturation_factor != 1.0:
        hsv_img = cv2.cvtColor(expanded_img, cv2.COLOR_RGB2HSV)
        hsv_img[:, :, 1] = np.clip(hsv_img[:, :, 1] * saturation_factor, 0, 255).astype(np.uint8)
        expanded_img = cv2.cvtColor(hsv_img, cv2.COLOR_HSV2RGB)

    # 亮度
    if brightness_factor != 1.0:
        expanded_img = cv2.convertScaleAbs(expanded_img, alpha=brightness_factor)

    # 直方图均匀化
    if adaptive_histogram:
        lab_img = cv2.cvtColor(expanded_img, cv2.COLOR_BGR2LAB)
        l_channel, a_channel, b_channel = cv2.split(lab_img)
        clahe = cv2.createCLAHE(clipLimit=clip_limit, tileGridSize=grid_size)
        l_channel = clahe.apply(l_channel)
        lab_img = cv2.merge((l_channel, a_channel, b_channel))
        expanded_img = cv2.cvtColor(lab_img, cv2.COLOR_LAB2BGR)
    if grayscale:
        expanded_img = cv2.cvtColor(expanded_img, cv2.COLOR_RGB2GRAY)
        expanded_img = np.stack([expanded_img, expanded_img, expanded_img], axis=-1)  # Convert back to 3 channels



    return expanded_img
# 获取窗口句柄
hwnd = win32gui.FindWindow("UnrealWindow", "鸣潮  ")  # 替换成你实际的窗口句柄
# 设定截图区域的左上角坐标 (x, y) 和右下角坐标 (x, y)
left, top, right, bottom = 18,1055,221,1080  # 替换成你实际的区域坐标
#判断是否有这个目录 没有就创建
is_f_list=[False,False,False]
last_coords =None
jici_chao=[0,0,0]
cv2.namedWindow("1", cv2.WINDOW_NORMAL)
#cv2保持置顶
cv2.setWindowProperty("1", cv2.WND_PROP_TOPMOST, 1)
while True:

    IMG=screenshot(hwnd, left, top, right, bottom, filename=None,is_top=False)
    #contrast_factor=1.4,adaptive_histogram=True, clip_limit=2, grid_size=(10, 10)  在绿色地方比较好
    #contrast_factor=1.4,adaptive_histogram=True, clip_limit=2, grid_size=(20, 20)  在雪山比较好
    IMG= expand_image(IMG,40,contrast_factor=1.4,adaptive_histogram=True, clip_limit=2, grid_size=(20, 20) ,brightness_factor=0.9)
    result, elapse = engine(IMG)
    if result!= None:
        sim=result[0][-1]
        if sim>0.8:
            txt=result[0][-2]
            txt_list=split_string(txt)
            if len(txt_list) == 3:

                current_coords = [int(txt_list[0]), int(txt_list[1]), int(txt_list[2])]
                if last_coords==None:
                    last_coords=current_coords.copy()
                if abs(current_coords[0]-last_coords[0])>20:

                    jici_chao[0]+=1

                    if jici_chao[0]>3:
                        jici_chao[0]=0
                        last_coords[0] = current_coords[0]
                    else:

                        current_coords[0] = last_coords[0]
                else:
                    last_coords[0]=current_coords[0]

                if abs(current_coords[1]-last_coords[1])>20:

                    jici_chao[1] += 1
                    if jici_chao[1]>3:
                        jici_chao[1]=0
                        last_coords[1] = current_coords[1]
                    else:

                        current_coords[1] = last_coords[1]
                else:
                    last_coords[1] = current_coords[1]
                if abs(current_coords[2]-last_coords[2])>20:
                    jici_chao[2] += 1
                    if jici_chao[2] > 3:
                        jici_chao[2] = 0
                        last_coords[2] = current_coords[2]
                    else:

                        current_coords[2] = last_coords[2]
                else:
                    last_coords[2] = current_coords[2]

                #cv2写字
                IMG=cv2.putText(IMG, str(current_coords), (10, 100), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
                #cv2保持置顶
                cv2.imshow("1", IMG)
                cv2.waitKey(1)  # 等待按键事件
                print(current_coords, sim)
            time.sleep(1)
    else:
        time.sleep(0.2)
        last_coords=None
        jici_chao=[0,0,0]

