# -*- coding = utf-8 -*-
# @Time : 2022/10/28 17:56
# @Author : cxk
# @File : z_detect5.py
# @Software : PyCharm

# 20230819 CP：添加鼠标下键开关autoaim功能，以及end健结束功能；
# 20230820 CP：添加鼠标上键切换扮演t还是ct；调整autoaim观察窗口位置在侧面便于观察；

import multiprocessing
import sys
import ctypes

import argparse

import pynput
import win32con
import win32api

from mss import mss
from pynput import mouse

from z_captureScreen import capScreen

from models.experimental import attempt_load
from utils.datasets import letterbox
from utils.utils import *

from z_ctypes import SendInput, mouse_input
from multiprocessing import Process
from threading import Thread
end = 'end'
box = 'box'
torct='torct'
aim = 'aim'
init = {
    end: False,  # 退出标记, End 键按下后改为 True, 其他进程线程在感知到变更后结束自身
    box: 'box',
    torct: True,  # 缺省显示扮演t
    aim: False,  # 自瞄开关
}

PROCESS_PER_MONITOR_DPI_AWARE = 2
ctypes.windll.shcore.SetProcessDpiAwareness(PROCESS_PER_MONITOR_DPI_AWARE)


def local_mouse(data):
    def down(x, y, button, pressed):
        if pressed:
            if button == pynput.mouse.Button.x2:
                # 侧上键
                data[torct] = not data[torct]
                # print("data:{}".format(data))
                print(f'act as  {"t" if data[torct] else "ct"}')
            elif button == pynput.mouse.Button.x1:
                # 侧下键控制是否要autoaim
                data[aim] = not data[aim]
                print(f'Switch AutoAim: {"enable" if data[aim] else "disable"}')

    with pynput.mouse.Listener(on_click=down) as m:
        m.join()


def keyboard(data):
    def release(key):
        if key == pynput.keyboard.Key.end:
            # 结束程序
            data[end] = True
            return False

    with pynput.keyboard.Listener(on_release=release) as k:
        k.join()

def pre_process(img0, img_sz, half, device):
    """
    img0: from capScreen(), format: HWC, BGR
    """
    # padding resize
    img = letterbox(img0, new_shape=img_sz)[0]
    # convert
    img = img[:, :, ::-1].transpose(2, 0, 1)  # BGR -> RGB, HWC -> CHW
    img = np.ascontiguousarray(img)

    # preprocess
    img = torch.from_numpy(img).to(device)
    img = img.half() if half else img.float()  # uint8 to fp16/32
    img /= 255.0  # 0-255 to 0.0-1.0
    if img.ndimension() == 3:
        img = img.unsqueeze(0)
    return img


def inference_img(img, model, augment, conf_thres, iou_thres, classes, agnostic):
    """
    推理，模型参数，...
    """
    # inference
    pred = model(img, augment=augment)[0]
    # apply NMS
    pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic)
    return pred


def calculate_position(xyxy):
    """
    计算中心坐标
    """
    c1, c2 = (xyxy[0], xyxy[1]), (xyxy[2], xyxy[3])
    # print('\n左上点坐标:(' + str(c1[0]) + ',' + str(c1[1]) + '), 右上点坐标:(' + str(c2[0]) + ',' + str(c1[1]) + ')')
    # print('左下点坐标:(' + str(c1[0]) + ',' + str(c2[1]) + '), 右下点坐标:(' + str(c2[0]) + ',' + str(c2[1]) + ')')
    # print("中心点的坐标为：(" + str((c2[0] - c1[0]) / 2 + c1[0]) + "," + str((c2[1] - c1[1]) / 2 + c1[1]) + ")")
    center_x = int((c2[0] - c1[0]) / 2 + c1[0])
    center_y = int((c2[1] - c1[1]) / 2 + c1[1])
    return center_x, center_y

def view_imgs(img0):
    img0 = cv2.resize(img0, (640,480))
    cv2.imshow('CSGO autoaim Window', img0)
    cv2.moveWindow('CSGO autoaim Window', 1280, 0)  # Move it to (40,30)
    if (cv2.waitKey(1) & 0xFF) == ord('q'):
        cv2.destroyAllWindows()
        exit(0)


def move_mouse(mouse_pynput, aim_persons_center):
    """
    move mouse，暂定为欧氏距离
    指针移动到距离当前鼠标位置最短的坐标处
    """
    if aim_persons_center:
        # 当前鼠标位置
        current_x, current_y = mouse_pynput.position
        # 距离当前位置最近的目标中心点and距离
        best_position = None
        for aim_person in aim_persons_center:
            # aim_person is a list
            dist = ((aim_person[0] - current_x) ** 2 + (aim_person[1] - current_y) ** 2) ** .5
            if not best_position:
                best_position = (aim_person, dist)
            else:
                _, old_dist = best_position
                if dist < old_dist:
                    best_position = (aim_person, dist)

        tx = int(best_position[0][0] / win32api.GetSystemMetrics(0) * 65535)
        ty = int(best_position[0][1] / win32api.GetSystemMetrics(1) * 65535)
        SendInput(mouse_input(win32con.MOUSEEVENTF_MOVE | win32con.MOUSEEVENTF_ABSOLUTE, tx, ty))


        # 下句控制直接射击，如不需要注释掉
        # mouse_pynput.click(mouse.Button.left, 1)

class AimYolo:
    def __init__(self, opt):
        self.data=opt
        self.weights = opt.get('weights')
        self.img_size = opt.get('img_size')
        self.conf_thres = opt.get('conf_thres')
        self.iou_thres = opt.get('iou_thres')
        self.view_img = opt.get('view_img')
        self.agnostic_nms = opt.get('agnostic_nms')
        self.augment = opt.get('augment')
        self.bounding_box = {'left': 0, 'top': 0, 'width': 1280, 'height': 800}

        # load model
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        self.half = self.device.type != 'cpu'  # half precision only supported on CUDA
        self.model = attempt_load(self.weights, map_location=self.device)  # load FP32 model
        self.model = self.model.to(self.device)
        self.img_size = check_img_size(self.img_size, s=self.model.stride.max())  # check img_size
        if self.half:
            self.model.half()  # to FP16

        # name and color
        self.names = self.model.modules.names if hasattr(self.model, 'module') else self.model.names
        self.colors = [[random.randint(0, 255) for _ in range(3)] for _ in range(len(self.names))]

    @torch.no_grad()
    def run(self,data):
        img_sz = self.img_size

        # warm up
        img = torch.zeros((1, 3, self.img_size, self.img_size), device=self.device)
        _ = self.model(img.half() if self.half else img) if self.device.type != 'cpu' else None

        # mss and capture screen
        sct = mss()
        print("The mss object created.")

        # mouse control
        mouse_control = mouse.Controller()
        print("The mouse controller created.")

        # 若只在循环中的if前定义空列表，在检测不到目标时，for循环没有对其进行初始化，会报错
        aim_persons_center = []
        aim_persons_center_head = []
        while True:
            # canvas.delete(tkinter.ALL)
            # 确定我们要射击的class
            if data.get('torct') == True:  # 12是匪徒T，03是警察CT
                clisttoshoot = [0, 3]
            else:
                clisttoshoot = [1, 2]
            # print("auto aim={}".format(self.data[aim]))
            if data.get(aim) is False: #如果没有自动瞄准求跳过
                continue

            img0 = capScreen(sct, self.bounding_box)  # HWC and BGR

            img = pre_process(img0=img0, img_sz=img_sz, half=self.half, device=self.device)

            t1 = torch_utils.time_synchronized()
            # print("torct={}".format(data['torct']))
            pred = inference_img(img=img, model=self.model, augment=self.augment, conf_thres=self.conf_thres,
                                 iou_thres=self.iou_thres, classes=clisttoshoot, agnostic=self.agnostic_nms)
            t2 = torch_utils.time_synchronized()

            # process detections
            det = pred[0]

            s = ''
            s += '%gx%g ' % img.shape[2:]  # print string

            if det is not None and len(det):
                # rescale boxes from img_size to im0 size
                det[:, :4] = scale_coords(img.shape[2:], det[:, :4], img0.shape).round()

                # print results
                for c in det[:, -1].unique():
                    n = (det[:, -1] == c).sum()  # detections per class
                    s += '%g %ss, ' % (n, self.names[int(c)])  # add to string

                # write results
                aim_persons_center = []
                aim_persons_center_head = []

                for *xyxy, conf, cls in det:
                    # Add bbox to image
                    label = '%s %.2f' % (self.names[int(cls)], conf)
                    plot_one_box(xyxy, img0, label=label, color=self.colors[int(cls)], line_thickness=2)
                    center_x, center_y = calculate_position(xyxy)
                    # aim_persons_center.append([center_x, center_y])
                    # print("识别到的类={}".format(str(int(cls))))
                    # print("我们要射击的类={}".format(clisttoshoot))
                    if int(cls) in clisttoshoot:
                        aim_persons_center_head.append([center_x, center_y])
                        # print('%sDone. (%.3fs)' % (s, t2 - t1))

            # Print time (inference + NMS)
            # 计算中心坐标，若有多个目标，只取其中距离最近的一个
            if len(aim_persons_center_head)>0:
                move_mouse(mouse_control, aim_persons_center_head)
                aim_persons_center = []
                aim_persons_center_head = []

                # view img
                if self.view_img:
                    view_imgs(img0=img0)

def detect(data):
    aim_yolo = AimYolo(data)
    # The AimYolo Object Created
    aim_yolo.run(data)

def parseArgs():
    parser = argparse.ArgumentParser()
    parser.add_argument('--weights', nargs='+', type=str, default='weights/best_200.pt', help='model.pt path(s)')
    parser.add_argument('--img-size', type=int, default=1280, help='inference size (pixels)')
    parser.add_argument('--conf-thres', type=float, default=0.4, help='object confidence threshold') #有些物体会误判时提高这个门限
    parser.add_argument('--iou-thres', type=float, default=0.5, help='IOU threshold for NMS')
    parser.add_argument('--view-img', action='store_true', help='display results')
    parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
    parser.add_argument('--augment', action='store_true', help='augmented inference')
    parser.add_argument('--torct',type=str, default=True, help='True or False') # 设置扮演角色t还是ct
    opt = parser.parse_args()
    return opt

if __name__ == '__main__':
    multiprocessing.freeze_support()  # windows 平台使用 multiprocessing 必须在 main 中第一行写这个
    manager = multiprocessing.Manager()
    data = manager.dict()  # 创建进程安全的共享变量
    data.update(init)  # 将初始数据导入到共享变量
    opt = parseArgs()
    data.update(vars(opt)) # 把namespace转为字典
    # {'end': False, 'torct': True, 'aim': False, 'weights': 'weights/best_200.pt', 'img_size': 1280, 'conf_thres': 0.4,
    #  'iou_thres': 0.5, 'view_img': True, 'agnostic_nms': False, 'augment': False, 'torct': 't'}
    # 将键鼠监听和压枪放到单独进程中跑
    pm = Process(target=local_mouse, args=(data,))
    pk = Process(target=keyboard, args=(data,))
    pd = Process(target=detect, args=(data,))
    pm.start()
    pk.start()
    pd.start()
    pk.join()  # 不写 join 的话, 使用 dict 的地方就会报错 conn = self._tls.connection, AttributeError: 'ForkAwareLocal' object has no attribute 'connection'
    pm.terminate()  # 鼠标进程无法主动监听到终止信号, 所以需强制结束
    pd.terminate()
