"""
MTM主程序
"""
import os
import cv2
import time
import math
import config
import numpy as np
import tensorflow as tf
import math_utility as mu
import gesture_recognition_utility as gu
from CNM import point_matching


os.environ["CUDA_VISIBLE_DEVICES"] = '-1'   # 禁用GPU


# MTM全局变量
RESCALE_RATE = config.RESCALE_RATE
KERNEL_SIZE = config.KERNEL_SIZE
FAST_RADIUS = config.FAST_RADIUS
SET_MAX_LENGTH = config.SET_MAX_LENGTH
MIN_DISTANCE = config.MIN_DISTANCE
# CNN模型超参数
REVISE_WIDTH = config.REVISE_WIDTH
REVISE_HEIGHT = config.REVISE_HEIGHT
MODEL_WIDTH = config.MODEL_WIDTH
MODEL_HEIGHT = config.MODEL_HEIGHT
# LSTM模型超参数
SLOT_NUMBERS = config.SLOT_NUMBERS
VIDEO_CAPTURE = cv2.VideoCapture('./Test/X.mp4')   # 获取视频流
OUTPUT_VIDEO = cv2.VideoWriter('./Video/output.mp4', cv2.VideoWriter_fourcc(*'XVID'), 30.0, (960, 640), True)   # 输出演示视频
CNN_MODEL_DIR = './CNN/Model/model.meta'
LSTM_MODEL_DIR = './LSTM/Models/64(0.001)/Model/model.ckpt'


# 在一个窗口中展示多个视频
def show_window(video_frame):
    cv2.putText(video_frame, 'FPS: ' + str(round(1 / (time_end - time_sta), 2)),
                (int(REVISE_WIDTH / 20), int(REVISE_HEIGHT / 20)), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 2)
    triple_heatmap = video_frame.copy()
    triple_heatmap[:, :, 0] = heat_map
    triple_heatmap[:, :, 1] = heat_map
    triple_heatmap[:, :, 2] = heat_map
    multi_img = np.hstack([triple_heatmap, video_frame])
    cv2.namedWindow('show', 0)
    cv2.resizeWindow('show', 960, 640)
    cv2.imshow("show", multi_img)
    OUTPUT_VIDEO.write(multi_img)


if __name__ == '__main__':
    # 打开session
    with tf.Session() as sess:
        # 导入模型
        saver_cnn = tf.train.import_meta_graph(CNN_MODEL_DIR)
        saver_cnn.restore(sess, tf.train.latest_checkpoint('./CNN/Model/'))  # 导入CNN模型
        # 导入计算图
        graph = tf.get_default_graph()
        x = graph.get_tensor_by_name('input/x:0')
        # 逐帧计算
        last_points = []
        frame_count = 0
        time_all_sum = 0
        time_network_sum = 0
        ang_list = []
        dis_list = []
        while True:
            ret, frame = VIDEO_CAPTURE.read()
            # 判断视频流中的帧是否存在
            if frame is None:
                break
            # 强制竖屏
            if frame.shape[1] > frame.shape[0]:
                frame = cv2.transpose(frame)
                frame = cv2.flip(frame, 1)
            # <--------------------计时开始-------------------->
            time_sta = time.time()
            frame = cv2.resize(frame, (REVISE_WIDTH, REVISE_HEIGHT))    # 将图像压缩至480*640
            copy = frame
            # 先缩小二分之一进行检测，再映射到原来的尺度空间中
            frame = cv2.resize(frame, (int(REVISE_WIDTH / RESCALE_RATE), int(REVISE_HEIGHT / RESCALE_RATE)))
            heat_map, point_set = gu.get_heatmap(frame)    # 获取热力图和聚类中心
            heat_map = cv2.resize(heat_map, (REVISE_WIDTH, REVISE_HEIGHT))
            input_list, out_points = gu.cut_image(copy, point_set)     # 对图像进行切片
            # 导入数据集进行测试
            feed_dict = {x: input_list}
            logits = graph.get_tensor_by_name('logits_eval:0')
            time_network_sta = time.time()
            classification_result = sess.run(logits, feed_dict)
            if len(classification_result) == 0:
                continue
            time_network_end = time.time()
            time_network_sum += time_network_end - time_network_sta
            # 输出预测矩阵每一行最大值的索引
            output = tf.argmax(classification_result, 1).eval()
            # 判断是否为指尖点，0是negative，1是positive
            fingertips = []
            for i in range(len(out_points)):
                if int(output[i]) == 1:
                    fingertips.append(out_points[i])
            for i in range(len(fingertips)):
                cv2.circle(copy, tuple((fingertips[i][0] * RESCALE_RATE, fingertips[i][1] * RESCALE_RATE)), 3, (0, 0, 255), cv2.FILLED)
            # 绘制匹配线段
            if frame_count > 0:
                match = point_matching(fingertips, last_points, slot=SLOT_NUMBERS)
                ang_set = [-1] * SLOT_NUMBERS
                dis_set = [-1] * SLOT_NUMBERS
                for i in range(len(match)):
                    point_sta = fingertips[match[i][0]]
                    point_end = last_points[match[i][1]]
                    distance = mu.get_distance(point_sta, point_end)
                    angle = mu.get_angle(point_sta, point_end)
                    if distance > MIN_DISTANCE and angle != -1:
                        ang_set[match[i][0]] = round(angle / (2 * math.pi), 2)
                        dis_set[match[i][0]] = round(distance, 2)
                        if ang_set.count(-1) < SLOT_NUMBERS - 1:
                            print(ang_set)
                            ang_list.append(ang_set)
                            dis_list.append(dis_set)
                        p1 = tuple((point_sta[0] * RESCALE_RATE, point_sta[1] * RESCALE_RATE))
                        p2 = tuple((point_end[0] * RESCALE_RATE, point_end[1] * RESCALE_RATE))
                        cv2.line(copy, p1, p2, (0, 255, 255), 2)
            last_points = fingertips
            time_end = time.time()
            time_all_sum += time_end - time_sta
            # <--------------------计时结束-------------------->

            show_window(copy)
            if cv2.waitKey(1) & 0xff == ord('n'):
                continue
            frame_count += 1
            print('CORE: %.4f\tALL: %.4f' % (time_network_sum / frame_count, time_all_sum / frame_count))
    VIDEO_CAPTURE.release()
    OUTPUT_VIDEO.release()
    cv2.destroyAllWindows()
