import json
import time

import cv2
import numpy as np
import pyautogui
import pytesseract

# 禁用FAILSAFE
pyautogui.FAILSAFE = False
# 计算屏幕中心坐标
screen_center_x = pyautogui.size()[0] // 2
screen_center_y = pyautogui.size()[1] // 2
time.sleep(1)


def drag_to_top_left():
    # 鼠标移动到屏幕中心
    pyautogui.moveTo(screen_center_x, screen_center_y)
    # 鼠标拖动到屏幕左上角
    pyautogui.dragTo(0, 0, duration=0.5)
    # 等待一段时间
    time.sleep(0.5)

drag_to_top_left()
drag_to_top_left()
drag_to_top_left()
drag_to_top_left()


def drag_to_right():
    # 鼠标移动到屏幕中心
    pyautogui.moveTo(screen_center_x, screen_center_y)
    # 鼠标拖动到屏幕右边
    pyautogui.dragTo(pyautogui.size()[0], screen_center_y, duration=0.5)
    # 等待一段时间
    time.sleep(1)


def drag_to_up():
    # 鼠标移动到屏幕中心
    pyautogui.moveTo(screen_center_x, screen_center_y)
    # 鼠标拖动到屏幕上边
    pyautogui.dragTo(screen_center_x, 0, duration=0.5)
    # 等待一段时间
    time.sleep(1)


def drag_to_down():
    # 鼠标移动到屏幕中心
    pyautogui.moveTo(screen_center_x, screen_center_y)
    # 鼠标拖动到屏幕下边
    pyautogui.dragTo(screen_center_x, pyautogui.size()[1], duration=0.5)
    # 等待一段时间
    time.sleep(1)


def drag_to_left():
    # 鼠标移动到屏幕中心
    pyautogui.moveTo(screen_center_x, screen_center_y)
    # 鼠标拖动到屏幕左边
    pyautogui.dragTo(0, screen_center_y, duration=0.5)
    # 等待一段时间
    time.sleep(1)


def find_and_click_target(target_image):
    # 截取屏幕截图
    screenshot = pyautogui.screenshot()
    screenshot = np.array(screenshot)
    screenshot = cv2.cvtColor(screenshot, cv2.COLOR_RGB2BGR)
    # 在屏幕截图中搜索目标图片
    result = cv2.matchTemplate(screenshot, target_image, cv2.TM_CCOEFF_NORMED)
    min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)
    # 如果匹配度超过阈值，则截取正下方指定位置的图片并用OCR提取文字
    if max_val >= 0.5:
        target_height, target_width, _ = target_image.shape
        target_x, target_y = max_loc[0] + target_width // 2, max_loc[1] + target_height // 2
        # 截取图片
        screenshot_below = screenshot[target_y + 45:target_y + 80, target_x - 100:target_x + 100]  # 200x30的图片
        # 放大图片
        screenshot_below = cv2.resize(screenshot_below, None, fx=2, fy=2, interpolation=cv2.INTER_LINEAR)
        # 模糊处理
        # cv2.imshow("Image", screenshot_below)
        blurred = cv2.GaussianBlur(screenshot_below, (5, 5), 0)
        # 转换为灰度图像
        gray = cv2.cvtColor(blurred, cv2.COLOR_BGR2GRAY)
        # 使用OCR提取文字
        extracted_text = pytesseract.image_to_string(gray, lang='chi_sim')
        print("Extracted Text:", extracted_text)


with open('path.json') as file:
    data = json.load(file)
value = data.get("3")

target_image = cv2.imread(f'icon/zhandian.png')
find_and_click_target(target_image)
drag_to_right()
find_and_click_target(target_image)
drag_to_right()
find_and_click_target(target_image)
drag_to_right()
find_and_click_target(target_image)
drag_to_down()
find_and_click_target(target_image)
drag_to_left()
find_and_click_target(target_image)
drag_to_left()
find_and_click_target(target_image)
drag_to_left()
find_and_click_target(target_image)
drag_to_down()
find_and_click_target(target_image)
drag_to_right()
find_and_click_target(target_image)
drag_to_right()
find_and_click_target(target_image)
drag_to_right()
find_and_click_target(target_image)
drag_to_down()
find_and_click_target(target_image)
drag_to_left()
find_and_click_target(target_image)
drag_to_left()
find_and_click_target(target_image)
drag_to_left()
find_and_click_target(target_image)
drag_to_down()
