import json
import logging
import os
import random

import re
import time

import cv2
from PIL import Image
import pytesseract
import matplotlib.pyplot as plt
import numpy as np

from src.mouse_key import *
from src.configs import IMAGE_PATH, TEMP_IMAGE_PATH

logging.basicConfig(level=logging.DEBUG)

resolution = (1280, 960)
center_x = int(resolution[0] / 2)
center_y = int(resolution[1] / 2)
person_bottom = 644


class CurrentMapPosition:

    def __init__(self, X=None, Y=None, image_path=None, upscale=4):
        # 台式机
        self.X = (585, 31, 610, 45) if X is None else X
        self.Y = (633, 31, 658, 45) if Y is None else Y
        # 笔记本
        # self.X = (585, 48, 610, 64) if X is None else X
        # self.Y = (633, 48, 658, 64) if Y is None else Y
        self.image_path = image_path
        self.upscale = upscale

    def screenshot_map_coor(self, postfix=""):
        image_path_x = os.path.join(self.image_path, f"coor_x{postfix}.png")
        image_path_y = os.path.join(self.image_path, f"coor_y{postfix}.png")
        screenshot_coor(self.X, image_path=image_path_x, point_and_save=False, expand=3)
        screenshot_coor(self.Y, image_path=image_path_y, point_and_save=False, expand=3)
        # 二值化
        image_x = self.to_black_background(image_path_x)
        image_y = self.to_black_background(image_path_y)
        # 放大
        image_x = self.upscale_image(image_x, self.upscale, image_path_x)
        image_y = self.upscale_image(image_y, self.upscale, image_path_y)
        return image_x, image_y

    def cut_noise(self, image, span=2):
        height, width = image.shape
        black_mat = 1 * (image == 0)
        # print("black_mat:\n", black_mat)
        if np.sum(black_mat[:, width - span:]) > 0:
            for i in reversed(range(0, width - span)):
                if np.sum(black_mat[:, i]) == 0:
                    image[:, i:] = 255
                    break
        if np.sum(black_mat[:, :span]) > 0:
            for i in range(span, width):
                if np.sum(black_mat[:, i]) == 0:
                    image[:, :i] = 255
                    break
        # print("image.shape", image.shape)
        # print("image:\n", image)
        return image

    def to_black_background(self, image_path):
        image = cv2.imread(image_path)
        image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        image = 255 * (image != 255)
        image = self.cut_noise(image)
        cv2.imwrite(image_path, image)
        image = cv2.imread(image_path)
        return image

    def upscale_image(self, image, scale, image_path):
        """通过图形的效果看来，cv2.INTER_CUBIC、cv2.INTER_LANCZOS4和cv2.INTER_BITS2效果比较好，优于双线性插值cv2.INTER_LINEAR，'
        但是双线性插值速度更佳，如果要缩小图片可以考虑cv2.INTER_LINEAR。"""
        height, width = image.shape[:2]
        new_height = int(height * scale)
        new_width = int(width * scale)
        new_image = cv2.resize(image, (new_width, new_height), interpolation=cv2.INTER_BITS2)
        new_image = 255 * (new_image > 220)
        cv2.imwrite(image_path, new_image)
        new_image = cv2.imread(image_path)
        return new_image

    def get(self):
        # 1.截图
        image_x, image_y = self.screenshot_map_coor()

        x = pytesseract.image_to_string(Image.fromarray(image_x),
                                        config='--psm 10 --oem 3 -c tessedit_char_whitelist=0123456789')
        y = pytesseract.image_to_string(Image.fromarray(image_y),
                                        config='--psm 10 --oem 3 -c tessedit_char_whitelist=0123456789')
        x = int(re.sub("\D", "", x))
        y = int(re.sub("\D", "", y))
        logging.info(f"X：{x}, Y:{y}")
        return (x, y)


def check_in_range(point, target_point, criterion=6):
    distance = get_euclidean_distance(point, target_point)
    print("distance:", distance)
    if distance <= criterion:
        return True
    else:
        return False


def change_skill():
    time.sleep(3)
    pdg.moveTo(300, 300)
    pdg.click()
    keys = ["f1", "f2", "f3"]  # , "f4"

    # keys = ["f2", "f3"]
    key_idx = 0

    while True:
        key = keys[key_idx]
        pdg.press(key)
        print(f"当前Key:{key}")
        if key_idx < len(keys) - 1:
            key_idx += 1
        else:
            key_idx = 0


class SearchCoordinates:

    def __init__(self, coors=[], search_limit=15, criterion=6):
        self.coors = coors
        self.search_limit = search_limit
        self.position_detector = CurrentMapPosition(image_path=TEMP_IMAGE_PATH)
        self.skill = ""
        self.track = []
        self.criterion = criterion

    def get_nearest_target_idx(self, current_position):
        distances = [get_euclidean_distance(current_position, target) for target in self.coors]
        # print("distances", distances)
        return np.argmin(distances)

    def random_click(self, fix_count=0):
        apply_random = False
        if fix_count == 0:
            apply_random = True
        elif fix_count > 0:
            recent_movements = self.track[-fix_count:]
            if len(set(recent_movements)) == 1:
                apply_random = True

        if apply_random:
            pdg.moveTo(random.randint(200, 800), random.randint(200, 800))
            click()
            return True
        else:
            return False

    def search(self, target_point):
        count = 0

        while True:
            previous = self.track[-1]
            time.sleep(2)
            if check_in_range(previous, target_point, criterion=self.criterion):
                break
            else:
                # distance = get_euclidean_distance(previous, target_point)
                # move_margin = int(distance / 6 * 32)
                # print("移动像素点：", move_margin)
                current = self.position_detector.get()
                self.track.append(current)
                random_movement = self.random_click(fix_count=4)
                time.sleep(2)
                if not random_movement:
                    line1 = [current, target_point]
                    line2 = [previous, current]
                    round = get_rotation_round(line1, line2)
                    rotated = False
                    if round is not None:
                        direction = get_rotation_direction(previous, current, target_point)
                        print("当前坐标：", current)
                        print("目标坐标：", target_point)
                        print(f"转动次数：{round},转动方向：{direction}")
                        if direction is not None:
                            rotate(round, direction=direction)
                            rotated = True
                    move_forward(step_limit=1, interval=1, margin=-55)
                    if not rotated:
                        print("未发生转动")
                else:
                    print("进行了随机移动")
            time.sleep(3)
            count += 1
            if count >= self.search_limit:
                break

    def change_skill(self):
        if random.random() <= 0.5:
            skill = "f1"
        else:
            skill = "f2"

        # elif random.random() < 0.3:
        #     skill = "f3"
        # else:
        #     skill = "f5"
        logging.info(f"释放技能：{skill}")
        # skill = random.choice(["f1", "f2", "f3", "f5"])
        pdg.press(skill)
        pdg.keyDown("shift")
        # 笔记本1280*960
        # point_screen(633,608)
        # 台式机1000*768
        point_screen(653, 575)
        click()
        pdg.keyUp("shift")

    # def reverse_coors(self, coors):
    #     list(range(len(coors)))
    #     for i in range(len(coors),)

    def run(self):
        point_screen()
        while True:
            try:
                current = self.position_detector.get()
                break
            except Exception as e:
                print("错误：", e)
                self.random_click()

        self.track.append(current)
        target_idx = self.get_nearest_target_idx(current)
        total_idx_count = len(self.coors)
        print("coor_idx:", target_idx)

        while True:
            target_point = self.coors[target_idx]
            print("target_point：", target_point)
            try:
                self.search(target_point)
                self.change_skill()
                if target_idx < total_idx_count - 1:
                    target_idx += 1
                else:
                    target_idx = 0
                    idx_list = list(range(len(self.coors)))
                    self.coors = [self.coors[i] for i in reversed(idx_list)]
            except Exception as e:
                print("错误：", e)
                # time.sleep(5)
                count_down(5)

# coor = get_blank_coor(int(row["X"]), int(row["Y"]), width, border)
# def make_ocr_data():
#     from tqdm import tqdm
#     position_detector = CurrentMapPosition(image_path=TEMP_IMAGE_PATH + "/ocr")
#     # previous = position_detector.get()
#     for i in tqdm(range(100)):
#         # move_forward(step_limit=1)
#         position_detector.screenshot_map_coor(postfix=f"_{i}")
#         time.sleep(2)


# def test_all():
#     from tqdm import tqdm
#     image_path_unr = r"F:\DataCenter\projects\happy_nage\temp_files\images\ocr\unrecognized"
#     results = {}
#     for root, _, files in os.walk(image_path_unr):
#         files = sorted(files)
#         length = len(files)
#         for i in tqdm(range(length)):
#             file = files[i]
#             image_path = os.path.join(root, file)
#             image = cv2.imread(image_path)
#             result = pytesseract.image_to_string(Image.fromarray(image),
#                                              config='--psm 10 --oem 3 -c tessedit_char_whitelist=0123456789')
#             results[file] = result
#     with open(image_path_unr + '/r.json', "w", encoding="utf-8") as fp:
#         json.dump(results, fp, ensure_ascii=False)


def main():
    # coors = [
    #     (303, 80),
    #     (292, 80),
    #     (274, 80),
    #     (269, 80),
    #     (266, 63),
    #     (274, 55),
    #     (292, 55),
    #     (303, 55),
    #     (303, 63),
    # ]
    # times = int((266 - 150) / 10)
    # coors = [(304, 150 + 10 * i) for i in range(times)]
    # print(coors)
    # coors = [
    #     (304, 160),
    #     (304, 180),
    #     (304, 200)
    # ]
    # coors = [
    #     (192, 80),
    #     (182, 80),
    #     (162, 80),
    #     (162, 70),
    #     (162, 62),
    #     (173, 62),
    #     (185, 62),
    #     (192, 62),
    #     (192, 72)
    # ]
    coors = [
        # (266, 80),
        # (266, 73),
        # (266, 64)
        # (230, 107),
        # (240, 107),
        # (250, 107),
        # (181, 72),
        # (171, 72),
        # (160, 72),
    ]
    coors = [
        (354, 357),
        (358, 349),
        (368, 342),
        (380, 336),
        (389, 339),
        (393, 330),
        (380, 341),
        (367, 349)
    ]
    searcher = SearchCoordinates(coors)
    searcher.run()
    # print(coors)


if __name__ == "__main__":
    main()

    # # make_ocr_data()
    # import numpy as np
    #
    # image_path = r"F:\DataCenter\projects\happy_nage\temp_files\images\ocr\apply"
    # image_path_unr = r"F:\DataCenter\projects\happy_nage\temp_files\images\ocr\unrecognized"
    # # revised_image_path = r"F:\DataCenter\projects\happy_nage\temp_files\images\ocr\manual"
    # image_file = os.path.join(image_path, "apply (65).png")
    # image_file2 = os.path.join(image_path_unr, "apply (76).png")
    # image = cv2.imread(image_file2)
    # r1 = pytesseract.image_to_string(Image.fromarray(image),
    #                                 config='--psm 10 --oem 3 -c tessedit_char_whitelist=0123456789')
    # r2 = pytesseract.image_to_string(Image.fromarray(image), lang="eng")
    # print("r1:", r1)
    # print("r2:", r2)
    # test_all()
