"""
-*- coding:utf-8 -*-
@Time:2025/8/4 15:23
@Author: viawang
@Email:  939654454@qq.com
@Wechat:bluesky___9999
@File:jumpjump_yolov8.py
"""

import math
import os
import random
import time
import pygetwindow as gw
import cv2
import mss
import pyautogui
import numpy as np
from loguru import logger
import torch
from ultralytics import YOLO


class JumpJumpV8:
    def __init__(self, model_path):
        self.imgdir = r"./imgs"
        self.paths = [os.path.join(self.imgdir, imgpath) for imgpath in os.listdir(self.imgdir)]
        # 新增属性
        self.success_count = 0
        self.fail_count = 0
        self.last_jump_success = False
        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        print(f'正在使用的设备加速是: {device}')
        self.model = YOLO(model_path)

    def common_action(self, imgpath, confidence=0.8, duration=random.uniform(0.5, 1), sleeptime=random.uniform(0.5, 1), click=True):
        pyautogui.sleep(sleeptime)
        position = pyautogui.locateOnScreen(imgpath, confidence=confidence)
        if click and position:
            randomx = position.left + random.randint(0, position.width)
            randomy = position.top + random.randint(0, position.height)
            pyautogui.moveTo(randomx, randomy, duration=duration)
            pyautogui.click(randomx, randomy, duration=random.uniform(0.001, 0.01))
        return position

    def filterpath(self, keyword):
        """
        根据关键词查找对应的图片路径
        :param keyword: 目标关键词
        :return: 返回包含关键词的图片路径
        """
        finalpath = [path for path in self.paths if keyword in path]
        if len(finalpath) == 1:
            return finalpath[0]
        elif len(finalpath) == 0:
            logger.error(f"没有找到目标图片路径...")
        else:
            logger.debug("符合条件的图片路径有多个，默认返回了第一个图片路径...")
            return finalpath[0]

    def click_miniprogram(self, miniprogrampic):
        self.common_action(miniprogrampic)

    def screencapture(self, left, top, width, height, index=None):
        with mss.mss() as sct:
            monitor = {'top': top, 'left': left, 'width': width, 'height': height, 'mon': 1}
            sct_img = sct.grab(monitor)
            imgnumpy = np.array(sct_img)
            imgnumpy = imgnumpy[..., :3]  # yolov8不支持4通道的图片，取3通道
            return imgnumpy

    def getobjects_coords(self, imgnumpy):
        """
        获取跳人（y2更靠上）底部坐标和最新跳板坐标（y最靠上）
        :param imgnumpy:
        :return:
        """
        results = self.model(imgnumpy)
        boxes = results[0].boxes
        max_jumpery, max_jumppannely = float('inf'), float('inf')  # 初始化为正无穷大，意思是比任何数都大
        best_jumper, best_jumppannel = None, None
        for box in boxes:
            class_id = box.cls.cpu().numpy().astype(int)[0]
            x1, y1, x2, y2 = box.xyxy.cpu().numpy()[0].astype(int)
            center_x = (x1 + x2) / 2  # x中心坐标
            if class_id == 0:
                jumper_bottom = (center_x, y2)  # 跳人底部坐标（x中心, y2）
                # logger.debug(f"检测到跳人，底部y2坐标：{y2}，坐标：{jumper_bottom}")
                # 若当前跳人的y2更小（更靠上），更新最佳跳人
                if y2 < max_jumpery:
                    max_jumpery = y2
                    best_jumper = jumper_bottom
            # 处理跳板（类别1）：筛选y中心最靠上的（逻辑不变）
            elif class_id == 1:
                center_y = (y1 + y2) / 2  # 跳板y中心坐标
                spring_center = (center_x, center_y)
                # logger.debug(f"检测到跳板，中心y坐标：{center_y}，坐标：{spring_center}")
                if center_y < max_jumppannely:
                    max_jumppannely = center_y
                    best_jumppannel = spring_center
        logger.debug(f"最佳跳人（y2最靠上）底部坐标：{best_jumper}, 最新跳板（最靠上）中心坐标：{best_jumppannel}")
        return best_jumper, best_jumppannel

    def cal_jumpdistance(self, jumper_center, jumppanel_center):
        x1, y1 = jumper_center
        x2, y2 = jumppanel_center
        distance = math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
        logger.warning(f"需要跳远的距离：{distance}")
        return distance

    def visualize_detection(self, imgnumpy, jumper_center, jumppanel_center, distance):
        vis_img = imgnumpy.copy()
        if jumper_center:
            x, y = jumper_center
            cv2.circle(vis_img, (int(x), int(y)), 5, (0, 0, 255), -1)
            cv2.putText(vis_img, "JUMPER", (int(x) + 10, int(y) - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)

        if jumppanel_center:
            x, y = jumppanel_center
            cv2.circle(vis_img, (int(x), int(y)), 5, (0, 255, 0), -1)
            cv2.putText(vis_img, "TARGET", (int(x) + 10, int(y) - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)

        if jumper_center and jumppanel_center:
            cv2.putText(vis_img, f"Distance: {distance:.1f}px", (20, 40),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2, cv2.LINE_AA)
            # 绘制预测的跳跃轨迹
            cv2.line(vis_img,
                     (int(jumper_center[0]), int(jumper_center[1])),
                     (int(jumppanel_center[0]), int(jumppanel_center[1])),
                     (255, 0, 0), 2)

        cv2.imshow("JumpJump AutoPlay", vis_img)
        return cv2.waitKey(1) == 27  # ESC键退出

    def autojump(self, distance):
        """
        执行跳跃
        :param distance:
        :return:
        """
        pyautogui.mouseDown()
        if distance < 50:
            base_coef = 1.52
        elif 50 < distance < 100:
            base_coef = 1.71
        elif 100 < distance < 150:
            base_coef = 2.42
        elif 150 < distance < 200:
            base_coef = 2.62
        elif 200 < distance < 250:
            base_coef = 2.72
        else:
            base_coef = 2.8
        press_time = (distance + random.uniform(0.01, 0.03)) * base_coef / 1000
        pyautogui.sleep(press_time)
        pyautogui.mouseUp()

    def main(self):
        miniprogrampic = self.filterpath("miniprogrampic")  # 根据关键词定位图标图片路径
        self.click_miniprogram(miniprogrampic)  # 根据给出的图片路径，点击屏幕上出现的图标图片
        curactwin = gw.getActiveWindow()  # 获取当前活动窗口的坐标信息
        left, top, width, height = curactwin.left + 8, curactwin.top + 50, curactwin.width - 16, curactwin.height - 58  # 适当裁剪下图片，方便精准定位
        logger.debug(f"当前小程序活动窗口的坐标信息：{left, top, width, height}")
        randomx, randomy = random.randint(left + 20, left + width - 20), random.randint(top + 20, top + height - 20)
        pyautogui.moveTo(randomx, randomy)
        index, times = 0, 0  # times用来记录未检测到跳人或者跳板的次数
        starttime = time.time()
        while time.time() - starttime <= 1800:
            index += 1
            imgnumpy = self.screencapture(left, top, width, height, index)  # 截图当前活动窗口的屏幕返回numpy
            jumper_center, jumppanel_center = self.getobjects_coords(imgnumpy)  # 通过numpy图片对象识别跳人，跳板目标坐标点
            distance = int(self.cal_jumpdistance(jumper_center, jumppanel_center))
            self.visualize_detection(imgnumpy, jumper_center, jumppanel_center, distance)
            self.autojump(distance)
            pyautogui.sleep(random.uniform(1, 1.5))
        logger.success("程序正常运行完毕！")


if __name__ == '__main__':
    model_path = r"./models/best.pt"
    jumpjumpv8 = JumpJumpV8(model_path)
    jumpjumpv8.main()
