import YOLOv8DetectorUtil
from PIL import Image
import time
import asyncio
import subprocess

import win32gui, win32ui, win32con
import numpy as np

import cv2
import joblib
import requests
import re
import math
import gc
from vnc import VNC
import keyboard
import random

# 1. 初始化检测器
detector = YOLOv8DetectorUtil.YOLOv8Detector('./model/best_simplified.onnx', confidence_thres=0.9, iou_thres=0.5)
# 2. 加载模型（仅需一次）
detector.load_model()

vnc = VNC("192.168.29.111", "5900", password="000000")
frame_queue = vnc.connect()

loop = asyncio.get_event_loop()


def is_cuda_available():
    return detector.get_provider_type()


def move_towards_target(start_pos, end_pos, rise, speed):
    # 检查 start_pos 是否有效
    if start_pos is None:
        return 400  # 或者返回一个默认值/错误码
    start_x, start_y = start_pos
    end_x, end_y = end_pos
    adjusted_start_y = start_y + rise

    dx = end_x - start_x
    dy = end_y - adjusted_start_y

    x_dir = keyboard.Right if dx > 0 else keyboard.Left
    y_dir = keyboard.Down if dy > 0 else keyboard.Up

    x_dist = abs(dx)
    y_dist = abs(dy)

    x_time = x_dist / ((speed + 100) * 2.6)
    y_time = y_dist / ((speed + 100) * 1.1)
    print(f"********预计移动x:{x_time}   y:{y_time}")

    loop.run_until_complete(move_async(x_time, y_time, x_dir, y_dir))


async def move_async(x_time, y_time, horiz_key, vert_key):
    """
        异步执行移动，确保水平移动先行启动
        水平和垂直移动会并行执行，但水平移动会先开始
        """
    # 先启动水平移动任务（不等待完成）
    horizontal_task = asyncio.create_task(move_horizontal(horiz_key, x_time))
    # 短暂延迟确保水平移动先行
    time.sleep(0.03)
    # 启动垂直移动任务
    vertical_task = asyncio.create_task(move_vertical(vert_key, y_time))

    # 等待两个任务都完成
    await asyncio.gather(horizontal_task, vertical_task)


async def move_horizontal(key, duration):
    """
    水平方向移动逻辑：
    1. 先执行一次按键
    2. 短暂等待后按住按键
    3. 根据总时长决定何时松开
    """
    # 生成预按键时间（50-100ms）
    pre_key_time = random.uniform(0.05, 0.1)
    start = time.time()
    # 先按一次键
    vnc.key_press(key)
    # 等待预按键时间
    time.sleep(pre_key_time)  # 使用asyncio.sleep而非time.sleep，避免阻塞事件循环
    # 按住按键
    vnc.key_down(key)
    # 计算已用时间
    elapsed = time.time() - start + pre_key_time
    # 如果总时长较短（小于0.21秒），直接松开
    if duration < 0.21:
        print(f"水平移动时间短，直接弹起")
        vnc.key_up(key)
        return
    # 计算需要保持按住的时间
    stop_time = duration - (elapsed / 3)
    print(f"水平移动停止时间: {stop_time:.4f}秒")
    # 保持按住状态
    time.sleep(stop_time)
    # 松开按键
    vnc.key_up(key)


async def move_vertical(key, duration):
    """
    垂直方向移动逻辑：
    1. 按住按键
    2. 保持指定时长
    3. 松开按键
    """
    if duration <= 0:
        return
    # 按住按键
    vnc.key_down(key)
    # 保持指定时间
    time.sleep(duration)
    # 松开按键
    vnc.key_up(key)


def clear_cuda_cache(verbose: bool = False):
    """
    清除 CUDA 显存缓存，防止内存积压导致程序崩溃。
    参数：
    - verbose: 是否打印清理信息，默认不打印。
    """
    if verbose:
        print("✅ 已清除 CUDA 显存缓存。")
    gc.collect()


def window_capture_win():
    return vnc.capture()


# 定义类别名称映射
class_mapping = {
    0: "hero",
    1: "monster",
    2: "goods",
    3: "boos",
    4: "door",
    5: "brand",
    6: "again"
}

# 定义映射规则 数字识别  【主机玩截图】
mapping = {
    '62226': '0',
    '281': '1',
    '43333': '2',
    '22335': '3',
    '22482': '4',
    '63334': '5',
    '53334': '6',
    '1532': '7',
    '44335': '8',
    '43335': '9',
    '1': '.'
}

# mapping = {
#     '62226': '0',
#     '281': '1',
#     '43333': '2',
#     '22335': '3',
#     '13282': '4',
#     '53334': '5',
#     '63343': '6',
#     '21631': '7',
#     '53335': '8',
#     '34336': '9',
#     '1': '.'
# }

"""
    识别移动速度
"""


def calculateMovingSpeed(img):
    # 将图像转换为灰度图
    img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    # 使用 np.where 计算每列中大于阈值的像素数量
    column_counts = np.sum(img >= 85, axis=0)
    current_sublist = []
    result = []
    for item in column_counts:
        if item == 0:
            if current_sublist:
                sublist_str = ''.join(map(str, current_sublist))
                if sublist_str in mapping:
                    result.append(mapping[sublist_str])
                current_sublist = []
        else:
            current_sublist.append(item)
    if current_sublist:
        sublist_str = ''.join(map(str, current_sublist))
        if sublist_str in mapping:
            result.append(mapping[sublist_str])
    return ''.join(result)


"""
    疲劳值检测推算
"""


def find_pl(image):
    img_bg = Image.fromarray(image)
    img = img_bg.convert('L')
    box1 = (4, 2, 7, 7)
    box2 = (9, 2, 12, 7)
    box3 = (14, 2, 17, 7)
    x = [img.crop(box1).getdata(), img.crop(box2).getdata(), img.crop(box3).getdata()]
    lr = joblib.load(r'./model/pl.pkl')
    y = lr.predict(x)
    filtered = [str(n) for n in y if n >= 0]
    return int("".join(filtered)) if filtered else -1  # 为空时返回默认值0


# 直接使用类别ID处理数据
def process_detection_results(results):
    formatted_data = [
        (int(det['class_id']), [det['center'][0], det['bottom']])
        for det in results
    ]
    return formatted_data


# 模板匹配法
def template_picture(img, template, threshold: float, flags: str):
    """
    img: 待匹配的大图 (cv2 图像对象)
    template: 模板图像对象 (cv2 图像对象，而不是路径)
    threshold: 匹配阈值
    flags: 0=灰度, 1=彩色
    """
    try:
        cv2.setUseOptimized(True)  # 开启优化（默认值）
        # 转换为灰度图像
        img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        template_gray = cv2.cvtColor(template, cv2.COLOR_BGR2GRAY)
        res = cv2.matchTemplate(img_gray, template_gray, cv2.TM_CCOEFF_NORMED)
        min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
        print(f"置信度：{threshold}、最大值：{max_val}、预测位置：{max_loc}====【{flags}】")
        if max_val < threshold:
            return False, max_val
        return True, max_loc

    except cv2.error:
        return False, None


def match_templates_with_loaded_templates(main_img, templates, threshold=0.01):
    """
    使用多个已加载的模板对象，在主图中查找所有模板的位置

    参数:
        main_img (np.ndarray): 主图图像数据（BGR）
        templates (dict[str, np.ndarray]): 模板字典 {路径: 图像对象}
        threshold (float): 匹配误差阈值（越小越严格）

    返回:
        list of dict: 每个模板的匹配结果列表
    """
    if main_img is None:
        raise ValueError("主图不能为空")

    results = []

    for path, template_img in templates.items():
        if template_img is None:
            results.append({
                'template': path,
                'error': f"模板图为空：{path}"
            })
            continue

        # 特殊模板独立阈值
        threshold = 0.4 if path in ["./model/i.png"] else threshold

        cv2.setUseOptimized(True)  # 关闭SIMD优化(如SSE/AVX),关闭会慢一倍
        s = time.time()
        # 转换为灰度图像
        img_gray = cv2.cvtColor(main_img, cv2.COLOR_BGR2GRAY)
        template_gray = cv2.cvtColor(template_img, cv2.COLOR_BGR2GRAY)
        res = cv2.matchTemplate(img_gray, template_gray, cv2.TM_CCOEFF_NORMED)
        elapsed = time.time() - s
        min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
        print(f"小地图检测：地址：{path}、位置：{max_loc}、置信度：{threshold}、得分：{max_val}")
        results.append({
            'template': path,
            'position': max_loc if max_val > threshold else None,
            'score': float(max_val),
            'time_ms': float(elapsed),
            'found': max_val > threshold
        })

    return results


# 获取图片信息
def get_win_image_info():
    image = window_capture_win()
    # 由于 OpenCV 默认使用 BGR 颜色空间，而截图可能是 BGRA，这里去掉透明度通道
    # stat_time = time.time()
    # 使用 YOLO 模型进行目标检测
    img, results = detector.infer(image)
    # print("推理耗时: {:.2f} ms".format((time.time() - stat_time) * 1000))
    # 使用列表推导式将生成器结果转换为列表
    return process_detection_results(results), img


def get_image_info(image):
    # stat_time = time.time()
    # 使用 YOLO 模型进行目标检测
    img, results = detector.infer(image)
    # end_time = time.time()
    return process_detection_results(results)


def extract_player_pos(datas):
    # print(f"玩家：{datas}")
    for class_id, (x, y) in datas:
        if class_id == 0:  # 0 对应 hero
            return x, y
    return None  # 如果没有找到人物


def extract_door_pos(datas):
    # print(f"门：{datas}")
    for class_id, (x, y) in datas:
        if class_id == 4:  # 0 对应 door
            return x, y
    return None  # 如果没有找到门坐标


def extract_goods_pos(datas):
    # print(f"商品：{datas}")
    for class_id, (x, y) in datas:
        if class_id == 2:  # 0 对应 goods
            return x, y
    return None  # 如果没有找到物品


def extract_monster_pos(datas):
    monster_points, direction = find_closest_point(datas)
    # print(f"怪物集：{datas} 怪物：{monster_points} 方向：{direction}")
    if not monster_points:  # 如果没有找到怪物
        return None, None
    return monster_points, direction  # 返回距离原点最近的怪物位置


def get_mini_map(self):
    # 截取整个窗口
    image = window_capture_win()
    # 截取小地图区域
    x, y, w, h = 670, 56, 123, 54
    mini_map_img = image[y:y + h, x:x + w]
    # 匹配
    minimaps = match_templates_with_loaded_templates(mini_map_img, self.mini_map_templates, threshold=0.9)
    return minimaps, image


def get_mini_map_img(self, image):
    box = (670, 56, 123, 54)
    img = image[box[1]:box[1] + box[3], box[0]:box[0] + box[2]]
    minimaps = match_templates_with_loaded_templates(img, self.mini_map_templates, threshold=0.3199)
    i_room = next((d for d in minimaps if d['template'].endswith('i.png') and d['found']), None)
    if not i_room:
        return 'unknown', None, None
    return int(i_room['position'][0]) // 18, int(i_room['position'][1]) // 18


def get_mini_map_type(self, mini_map_img):
    templates = self.mini_map_temp_types
    minimaps = match_templates_with_loaded_templates(mini_map_img, templates, threshold=0.99)
    return minimaps


def get_next_room_direction(data):
    i_room = next((d for d in data if d['template'].endswith('i.png') and d['found']), None)
    if not i_room:
        return 'unknown', None, None
    ix, iy = i_room['position'][0] // 18, i_room['position'][1] // 18
    n_room = next((d for d in data if d['template'].endswith('n.png') and d['found']), None)
    if not n_room:
        return 'unknown', (ix, iy), None

    return True, (ix, iy), True

def get_current_root_index(data):
    i_room = next((d for d in data if d['template'].endswith('i.png') and d['found']), None)
    if not i_room:
        return 'unknown', None, None
    ix, iy = i_room['position'][0] // 18, i_room['position'][1] // 18
    return ix, iy


def mouse_to_click(win_left, win_top, target_x, target_y):
    vnc.mouse_move(win_left + int(target_x), win_top + int(target_y))
    time.sleep(0.5)
    vnc.mouse_left_click()
    time.sleep(1.5)


def find_closest_point(data):
    # 提取标签0的坐标（假设只有一个标签0的点）
    point_0 = next((point for label, point in data if label == 0), None)
    if point_0 is None:
        return None, None

    # 提取所有标签1的坐标
    points_1 = [point for label, point in data if label == 1]
    if not points_1:
        return None, None

    # 计算每个标签1的点到标签0的点的距离，并找出最短距离对应的点
    closest_point = None
    min_distance = float('inf')

    for point in points_1:
        # 计算欧氏距离
        distance = math.sqrt(
            (point[0] - point_0[0]) ** 2 +
            (point[1] - point_0[1]) ** 2
        )
        if distance < min_distance:
            min_distance = distance
            closest_point = point
    if point_0[0] - closest_point[0] > 0:
        return (closest_point[0] + 50, closest_point[1]), keyboard.Left
    return (closest_point[1] - 50, closest_point[1]), keyboard.Right


def extract_door_pos_value(datas, sort_by_x=True, select_max=False):
    """
    从数据集提取类别为4的坐标点，并根据指定条件选择极值点，同时返回该类别的总个数

    参数:
        datas: 数据集，格式为[(class_id, [x, y]), ...]
        sort_by_x: 是否按x坐标排序（默认为True）
        select_max: 是否选择最大值（默认为False，选择最小值）

    返回:
        元组(符合条件的坐标(x, y), 类别为4的点的总个数)，若未找到则返回(None, 0)
    """
    # 筛选类别为4的坐标点
    door_points = [(x, y) for class_id, (x, y) in datas if class_id == 4]
    count = len(door_points)  # 计算类别为4的点的总个数

    if not door_points:
        return (None, count)

    # 获取排序键（x或y）
    sort_key = lambda p: p[0] if sort_by_x else p[1]

    # 使用min/max函数直接获取极值点（避免排序开销）
    selected_point = max(door_points, key=sort_key) if select_max else min(door_points, key=sort_key)

    return (selected_point, count)


if __name__ == '__main__':
    data = [{'template': './model/b.png', 'position': (108, 0),
             'score': 0.999973475933075, 'time_ms': 0.0, 'found': True},
            {'template': './model/i.png', 'position': (36, 0),
             'score': 0.9333259463310242, 'time_ms': 0.0, 'found': True},
            {'template': './model/n.png', 'position': (40, 3),
             'score': 0.6735045909881592, 'time_ms': 0.0012195110321044922, 'found': True}]
    i_position = get_current_root_index(data)
    print(f"小人物位置：{i_position}")