import argparse
import cv2 as cv


from client import predict_garbage
from hand_data_iter.datasets import draw_bd_handpose
from models.resnet import resnet50, resnet34
from models.yolov3 import Yolov3, Yolov3Tiny
from utils.datasets import *
from utils.parse_config import parse_data_cfg
from utils.torch_utils import select_device
from utils.utils import *
from detcet import *


use_cuda = torch.cuda.is_available()
gesture = {0: '不点击', 1: '点击'}

# 左手和右手手指位置
one_finger = [0, 0]
other_finger = [0, 0]


def process_data(img, img_size=416):  # 图像预处理
    img, _, _, _ = letterbox(img, height=img_size)
    # Normalize RGB
    img = img[:, :, ::-1].transpose(2, 0, 1)  # BGR to RGB
    img = np.ascontiguousarray(img, dtype=np.float32)  # uint8 to float32
    img /= 255.0  # 0 - 255 to 0.0 - 1.0
    return img


def load_handpose_model():
    parser = argparse.ArgumentParser(description=' Project Hand Pose Inference')

    parser.add_argument('--model_path', type=str, default='./weights/resnet50_2021-418.pth',
                        help='model_path')  # 模型路径
    parser.add_argument('--model', type=str, default='resnet_50',
                        help='model : resnet_x,')  # 模型类型
    parser.add_argument('--num_classes', type=int, default=42,
                        help='num_classes')  # 手部21关键点， (x,y)*2 = 42
    parser.add_argument('--GPUS', type=str, default='0',
                        help='GPUS')  # GPU选择
    parser.add_argument('--img_size', type=tuple, default=(256, 256),
                        help='img_size')  # 输入模型图片尺寸
    parser.add_argument('--vis', type=bool, default=True,
                        help='vis')  # 是否可视化图片
    ops = parser.parse_args()  # 解析添加参数
    unparsed = vars(ops)  # parse_args()方法的返回值为namespace，用vars()内建函数化为字典
    os.environ['CUDA_VISIBLE_DEVICES'] = ops.GPUS
    print('use model : %s' % (ops.model))
    if ops.model == 'resnet_50':
        model_ = resnet50(num_classes=ops.num_classes, img_size=ops.img_size[0])
    elif ops.model == 'resnet_34':
        model_ = resnet34(num_classes=ops.num_classes, img_size=ops.img_size[0])

    device = torch.device("cuda:0" if use_cuda else "cpu")
    model_ = model_.to(device)
    model_.eval()  # 设置为前向推断模式
    # 加载测试模型
    if os.access(ops.model_path, os.F_OK):  # checkpoint
        chkpt = torch.load(ops.model_path, map_location=device)
        model_.load_state_dict(chkpt)
    return ops, model_


def predict_handpose(img, ops, model_):
    img_width = img.shape[1]
    img_height = img.shape[0]
    # 输入图片预处理
    img_ = cv2.resize(img, (ops.img_size[1], ops.img_size[0]), interpolation=cv2.INTER_CUBIC)
    img_ = img_.astype(np.float32)
    img_ = (img_ - 128.) / 256.

    img_ = img_.transpose(2, 0, 1)
    img_ = torch.from_numpy(img_)
    img_ = img_.unsqueeze_(0)

    if use_cuda:
        img_ = img_.cuda()  # (bs, 3, h, w)
    pre_ = model_(img_.float())  # 模型推理
    output = pre_.cpu().detach().numpy()
    output = np.squeeze(output)
    return output, img_width, img_height


def detect(
        model_path,
        cfg,
        data_cfg,
        img_size=416,
        conf_thres=0.5,
        nms_thres=0.5,
        video_path=0,
):
    classes = load_classes(parse_data_cfg(data_cfg)['names'])
    # classification number
    num_classes = len(classes)
    # Initialize model
    weights = model_path
    if "-tiny" in cfg:
        a_scalse = 416. / img_size
        anchors = [(10, 14), (23, 27), (37, 58), (81, 82), (135, 169), (344, 319)]
        anchors_new = [(int(anchors[j][0] / a_scalse), int(anchors[j][1] / a_scalse)) for j in range(len(anchors))]
        model = Yolov3Tiny(num_classes, anchors=anchors_new)
    else:
        a_scalse = 416. / img_size
        anchors = [(10, 13), (16, 30), (33, 23), (30, 61), (62, 45), (59, 119), (116, 90), (156, 198), (373, 326)]
        anchors_new = [(int(anchors[j][0] / a_scalse), int(anchors[j][1] / a_scalse)) for j in range(len(anchors))]
        model = Yolov3(num_classes, anchors=anchors_new)

    device = select_device()  # 运行硬件选择

    # Load weights
    if os.access(weights, os.F_OK):  # 判断模型文件是否存在
        model.load_state_dict(torch.load(weights, map_location=device)['model'])
    else:
        print('error model not exists')
        return False
    model.to(device).eval()  # 模型模式设置为 eval

    colors = [(v // 32 * 64 + 64, (v // 8) % 4 * 64, v % 8 * 32) for v in range(1, num_classes + 1)][::-1]
    # video_capture = cv2.VideoCapture(video_path)
    video_capture = cv2.VideoCapture(0)
    # url="http://admin:admin@192.168.43.1:8081"
    # url = "http://192.168.1.106:8080/?action=stream"
    # video_capture = cv2.VideoCapture(url)
    video_writer = None
    loc_time = time.localtime()
    str_time = time.strftime("%Y-%m-%d_%H-%M-%S", loc_time)
    save_video_path = "./video/demo_{}.mp4".format(str_time)

    # 加载手势点识别
    ops, model_ = load_handpose_model()

    # 标记点击手势的时长
    # pose = [0] * 20
    pose = [0] * 10

    while True:
        ret, im0 = video_capture.read()
        if ret:
            t = time.time()
            img = process_data(im0, img_size)
            if use_cuda:
                torch.cuda.synchronize()
            img = torch.from_numpy(img).unsqueeze(0).to(device)
            pred, _ = model(img)  # 图片检测
            if use_cuda:
                torch.cuda.synchronize()
            detections = non_max_suppression(pred, conf_thres, nms_thres)[0]  # nms
            if use_cuda:
                torch.cuda.synchronize()
            if detections is None or len(detections) == 0:
                cv2.namedWindow('image', 0)
                cv2.imshow("image", im0)
                key = cv2.waitKey(1)
                if key == 27:
                    break
                continue
            # Rescale boxes from 416 to true image size
            detections[:, :4] = scale_coords(img_size, detections[:, :4], im0.shape).round()
            result = []
            for res in detections:
                result.append(
                    (classes[int(res[-1])], float(res[4]), [int(res[0]), int(res[1]), int(res[2]), int(res[3])]))
            if use_cuda:
                torch.cuda.synchronize()

            # 标记是否已经触发点击事件
            one_flag = 0
            other_flag = 0
            # Draw bounding boxes and labels of detections
            for index, (*xyxy, conf, cls_conf, cls) in enumerate(detections):
                label = '%s %.2f' % (classes[int(cls)], conf)
                xyxy = int(xyxy[0]), int(xyxy[1]) + 6, int(xyxy[2]), int(xyxy[3])
                # 绘制手部的方框
                plot_one_box(xyxy, im0, label=label, color=(15, 255, 95), line_thickness=3)
                img_hand = im0[xyxy[1]: xyxy[3], xyxy[0]: xyxy[2], :]
                output, img_width, img_height = predict_handpose(img_hand, ops, model_)
                # 关键点检测部分
                pts_hand = {}  # 构建关键点连线可视化结构
                for i in range(int(output.shape[0] / 2)):
                    x = (output[i * 2 + 0] * float(img_width))
                    y = (output[i * 2 + 1] * float(img_height))
                    pts_hand[str(i)] = {}
                    pts_hand[str(i)] = {
                        "x": x + xyxy[0],
                        "y": y + xyxy[1],
                    }
                # 选择手势
                if get_gesture_garbage(pts_hand) == 1:
                    pose[index] = pose[index] + 1
                    # 标记选中
                    if index == 0:
                        one_flag = 1
                    elif index == 1:
                        other_flag = 1
                else:
                    pose[index] = 0
                    # 选中标记取消
                    if index == 0:
                        one_flag = 0
                    elif index == 1:
                        other_flag = 0

                # 持续15秒后选中
                if pose[index] > 15:
                    cv.circle(im0, (int(pts_hand['8']['x']), int(pts_hand['8']['y'])), 20, (0, 0, 255), 2)
                    if index == 0:
                        one_finger[0] = pts_hand['8']['x']
                        one_finger[1] = pts_hand['8']['y']
                    if index == 1:
                        other_finger[0] = pts_hand['8']['x']
                        other_finger[1] = pts_hand['8']['y']
                elif pose[index] == 15:
                    os.system('start pythonw ./utils/voice.pyw "选中"')

                if one_flag == 1 and other_flag == 1:
                    cv2.rectangle(im0, (int(other_finger[0]), int(other_finger[1])),
                                  (int(one_finger[0]), int(one_finger[1])), color=(15, 255, 95), thickness=3)
                    if index == 1 and pose[1] == 30:
                        predict_img = im0[int(min(other_finger[0], one_finger[0])): int(
                            max(other_finger[1], one_finger[1])), int(min(other_finger[0], one_finger[0])): int(
                            max(other_finger[0], one_finger[0]))]
                        cv.imwrite('./garbage_img/garbage.jpg', predict_img)
                        answer = predict_garbage()
                        cmd = 'start pythonw ./utils/voice.pyw ' + "检测结果" + answer
                        os.system(cmd)
                draw_bd_handpose(im0, pts_hand, 0, 0)  # 绘制关键点连线
                # ------------- 绘制关键点
                for i in range(int(output.shape[0] / 2)):
                    x = (output[i * 2 + 0] * float(img_width)) + xyxy[0]
                    y = (output[i * 2 + 1] * float(img_height)) + xyxy[1]
                    cv2.circle(im0, (int(x), int(y)), 3, (255, 50, 60), -1)
                    cv2.circle(im0, (int(x), int(y)), 1, (255, 150, 180), -1)

            s2 = time.time()
            str_fps = ("{:.2f} Fps".format(1. / (s2 - t + 0.00001)))
            cv2.putText(im0, str_fps, (5, im0.shape[0] - 3), cv2.FONT_HERSHEY_DUPLEX, 0.9, (255, 0, 255), 4)
            # cv2.putText(im0, str_fps, (5, im0.shape[0] - 3), cv2.FONT_HERSHEY_DUPLEX, 0.9, (255, 255, 0), 1)

            # 视频展示
            cv2.namedWindow('image', 0)
            cv2.imshow("image", im0)
            key = cv2.waitKey(1)
            if video_writer is None:
                fourcc = cv2.VideoWriter_fourcc(*"mp4v")
                video_writer = cv2.VideoWriter(save_video_path, fourcc, fps=25, frameSize=(im0.shape[1], im0.shape[0]))
            video_writer.write(im0)
            if key == 27:
                break
        else:
            break
    cv2.destroyAllWindows()
    video_writer.release()


if __name__ == '__main__':
    voc_config = 'cfg/hand.data'  # 模型相关配置文件
    model_path = 'weights/hand_416.pt'  # 检测模型路径
    model_cfg = 'yolo'  # yolo / yolo-tiny 模型结构
    # 测试视频
    # video_path = "./video/SiliconValley.mp4"
    video_path = "./video/2.mp4"
    # video_path = "./video/Xi.mp4"

    img_size = 416  # 图像尺寸
    conf_thres = 0.5  # 检测置信度
    nms_thres = 0.6  # nms 阈值

    with torch.no_grad():  # 设置无梯度运行模型推理
        detect(
            model_path=model_path,
            cfg=model_cfg,
            data_cfg=voc_config,
            img_size=img_size,
            conf_thres=conf_thres,
            nms_thres=nms_thres,
            video_path=video_path,
        )
