"""
该文件为算法的执行文件
This is the main file of the algorithm
"""
import os
import cv2
import math
import time
import tensorflow as tf
import numpy as np
import data_manage as dm
import hand_extraction as he
import GRU
import NMST
import config
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'


# 文件路径
MODEL_PATH = './Models/32768(2e-05)/Model/model.ckpt'   # 模型保存路径
FILE_ROUTE = './Test/'
VIDEO_NAME = '1345.mp4'
VIDEO_CAPTURE = cv2.VideoCapture(FILE_ROUTE + VIDEO_NAME)   # 获取视频流
OUTPUT_VIDEO = cv2.VideoWriter('./Video/output.mp4', cv2.VideoWriter_fourcc(*'XVID'), 30.0, (540, 480), True)   # 输出演示视频
# 网络超参数
SEQ_MAX_LEN = config.SEQ_MAX_LEN    # 序列最大长度
N_HIDDEN_UNITS = config.N_HIDDEN_UNITS  # 隐藏节点数目
BATCH_SIZE = config.BATCH_SIZE  # 批处理大小
CLASSES = config.CLASSES    # 分类数目
# 模型超参数
REVISE_WIDTH = config.REVISE_WIDTH  # 压缩后的视频宽度
REVISE_HEIGHT = config.REVISE_HEIGHT    # 压缩后的视频高度
MIN_SPACING = config.MIN_SPACING    # 轨迹点集的元素个数小于此值时不参与计算
MIN_ANGLE = config.MIN_ANGLE    # 轨迹关键点的角度阈值
MIN_DISTANCE = config.MIN_DISTANCE  # 轨迹关键点的最小距离阈值，与压缩分辨率等比
MAX_DISTANCE = config.MAX_DISTANCE  # 轨迹关键点的最大距离阈值，与压缩分辨率等比
IGNORE_PROP = config.IGNORE_PROP    # 当前轨迹长度与先前轨迹长度的比例超过该值，会忽略先前轨迹
AUTO_RUNNING = True     # 自动播放或者手动播放


# 标签数值化
def one_hot(label):
    label_num = len(label)
    new_label = label.reshape(label_num)
    n_values = np.max(new_label) + 1   # 看结果取出最大的
    return np.eye(n_values)[np.array(new_label, dtype=np.int32)]


if __name__ == '__main__':

    # 设置网络输入的占入符
    with tf.name_scope('input'):
        X = tf.placeholder('float', [None, SEQ_MAX_LEN, 2], name='x_input')
        Y = tf.placeholder('float', [None, CLASSES], name='y_input')
        true_length = tf.placeholder(tf.int32, [None], name='length_input')

    # 设置权重和偏置参数
    weights = {
        'in': tf.Variable(tf.truncated_normal([2, N_HIDDEN_UNITS]), name='w_in'),
        'out': tf.Variable(tf.truncated_normal([N_HIDDEN_UNITS, CLASSES]), name='w_out')
    }
    biases = {
        'in': tf.Variable(tf.constant(0.1, shape=[N_HIDDEN_UNITS, ]), name='b_in'),
        'out': tf.Variable(tf.constant(0.1, shape=[CLASSES, ]), name='b_out')
    }

    # 定义LSTM的输入输出
    logits = GRU.GRU(X, weights, biases, true_length)   # 预测后的输出向量
    pre_ord = tf.argmax(logits, 1, name='output')   # 最大值对应坐标
    prediction = tf.nn.softmax(logits)  # 经过softmax后的输出向量
    pre_num = tf.reduce_max(prediction, 1, name='max')   # 最大值
    saver = tf.train.Saver()    # 加载模型持久化类

    # 算法主体部分
    long_count = -1  # 分割点分类持续出现的次数，出现次数越多，则越有可能被分割
    ang_list = []   # 累积获取到的角度信息
    dis_list = []   # 累积获取到的距离信息
    cen_point = []  # 轨迹质心|centroid of hand
    key_point = []  # 轨迹关键点|key point of trajectory
    cut_point = []  # 轨迹分割点|split point between two trajectories
    result_history = []    # 保存的历史预测值
    result = ['F', 0, 0]    # 当前的预测值
    frame_count = 0
    with tf.Session() as sess:
        temp_angle = []  # 临时角度
        saver.restore(sess, MODEL_PATH)     # 加载ckpt模型
        start_time = time.time()
        while True:
            ret, frame = VIDEO_CAPTURE.read()
            if frame is None:   # 判断视频流中的帧是否存在
                break
            if frame.shape[1] > frame.shape[0]:     # 强制竖屏
                frame = cv2.transpose(frame)
                frame = cv2.flip(frame, 1)
            frame = cv2.resize(frame, (REVISE_WIDTH, REVISE_HEIGHT))

            # 创建一张全白图
            show_img = frame.copy()
            show_img[:, :] = 255

            # 获取手势质心
            center = he.get_centroid(frame)
            cv2.circle(show_img, center, 4, (255, 0, 0), cv2.FILLED)

            # 获取关键点之间的角度与距离
            if len(cen_point) == 0:     # 记录初始点
                key_point.append(center)
            elif len(cen_point) == 1:   # 记录初始角度
                temp_angle.append(he.get_angle(cen_point[-1], center))
            else:   # 判断当前质心是否为关键点
                condition_ang = math.fabs(he.get_angle(cen_point[-1], center) - temp_angle[-1]) > MIN_ANGLE
                condition_dis_max = he.get_distance(center, key_point[-1]) > MAX_DISTANCE
                condition_dis_min = he.get_distance(center, key_point[-1]) > MIN_DISTANCE
                if (condition_ang or condition_dis_max) and condition_dis_min:
                    print('FRAME %s START' % frame_count)
                    temp_angle.append(he.get_angle(cen_point[-1], center))
                    key_point.append(center)
                    cur_angle = round(he.get_angle(key_point[-2], key_point[-1]) / (2 * math.pi), 4)
                    cur_distance = round(he.get_distance(key_point[-2], key_point[-1]), 2)
                    ang_list.append(cur_angle)
                    dis_list.append(cur_distance)

                    # 对轨迹进行非极大值抑制
                    if len(ang_list) > MIN_SPACING:
                        subset_index = NMST.subset(len(ang_list), MIN_SPACING, BATCH_SIZE)  # 获取轨迹的所有子集切片
                        dic_data = dm.read_data_list(ang_list, dis_list, subset_index, len(subset_index))   # 根据子集切片还原数据
                        data_x = dic_data['x']
                        data_len = dic_data['length']
                        data_sta = dic_data['start']
                        data_end = dic_data['end']
                        times = math.ceil(len(data_x) / BATCH_SIZE)
                        # 获取每个子集的预测结果
                        number = 0
                        output_pre_num = []
                        output_pre_ord = []
                        output_pre_sta = []
                        output_pre_end = []
                        for i in range(0, times):
                            input_pre_num, input_pre_ord = sess.run([pre_num, pre_ord], feed_dict={
                                X: np.array(data_x)[i * BATCH_SIZE: (i + 1) * BATCH_SIZE],
                                true_length: np.array(data_len)[i * BATCH_SIZE: (i + 1) * BATCH_SIZE]})
                            for j in range(len(input_pre_num)):
                                output_pre_ord.append(input_pre_ord[j])
                                output_pre_num.append(input_pre_num[j])
                                output_pre_sta.append(data_sta[number])
                                output_pre_end.append(data_end[number])
                                number += 1
                        # 遍历所有结果
                        cluster = []
                        for i in range(len(output_pre_ord)):
                            if output_pre_sta[i] == 0 and output_pre_end[i] == 0:
                                continue
                            sum_len = 0
                            for j in range(SEQ_MAX_LEN):
                                sum_len += data_x[i][j][1]
                            cluster.append([output_pre_ord[i], output_pre_num[i],
                                            output_pre_sta[i][0], output_pre_end[i][0], sum_len])
                            print('%s\t(%s  %s)\t\t%.4f\t\t%.4f' %
                                  (output_pre_ord[i], output_pre_sta[i][0], output_pre_end[i][0],
                                   output_pre_num[i], output_pre_num[i] * sum_len))
                        if len(cluster) > 0:
                            merged = NMST.merge(cluster)    # 对轨迹聚合
                            # 遍历合并后的类别，区分三类轨迹
                            max_val_pro = 0
                            max_srt_pro = 0
                            max_val_dis = 0
                            max_srt_dis = 0
                            merged_srt = []
                            merged_lon = []
                            merged_val = []
                            for i in range(len(merged)):
                                score = merged[i][6] * merged[i][7]
                                if merged[i][0] == 11:
                                    score *= (0.8 + 0.1 * (long_count + 1))
                                print('<----> %s (%s %s %s %s)\t\t%.4f' %
                                      (merged[i][0], merged[i][1], merged[i][2], merged[i][3], merged[i][4], score))
                                if merged[i][0] == 10:
                                    merged_srt.append(merged[i])
                                elif merged[i][0] == 11:
                                    merged_lon.append(merged[i])
                                else:
                                    merged_val.append(merged[i])
                            # 找出短轨迹中的最大值
                            if len(merged_srt) > 0:
                                for i in range(len(merged_srt)):
                                    cur_score = merged_srt[i][6] * merged_srt[i][7]
                                    max_score = merged_srt[max_srt_pro][6] * merged_srt[max_srt_pro][7]
                                    if merged_srt[i][8] > merged_srt[max_srt_dis][8]:
                                        max_srt_dis = i
                                    if cur_score > max_score:
                                        max_srt_pro = i
                            # 找出有效轨迹中的最大值
                            if len(merged_val) > 0:
                                for i in range(len(merged_val)):
                                    cur_score = merged_val[i][6] * merged_val[i][7]
                                    max_score = merged_val[max_val_pro][6] * merged_val[max_val_pro][7]
                                    if merged_val[i][8] > merged_val[max_val_dis][8]:
                                        max_val_dis = i
                                    if cur_score > max_score:
                                        max_val_pro = i
                            # 判断是否为分割点
                            j = 0
                            if len(merged_lon) > 0:
                                long_count += 1
                            while j < len(merged_lon):
                                # 计算各组得分
                                score_lon = merged_lon[j][6] * merged_lon[j][7] * (0.8 + 0.1 * long_count)
                                if len(merged_val) > 0:
                                    score_val = merged_val[max_val_pro][6] * merged_val[max_val_pro][7]
                                if len(merged_srt) > 0:
                                    score_srt = merged_srt[max_srt_pro][6] * merged_srt[max_srt_dis][7]
                                # 当且仅当长轨迹的得分大于所有其余分类时才判断为轨迹分割点
                                if len(merged_srt) > 0 and len(merged_val) > 0:
                                    if score_lon > score_val and score_lon > score_srt:
                                        break
                                elif len(merged_srt) <= 0 < len(merged_val):
                                    if score_lon > score_val:
                                        break
                                elif len(merged_srt) > 0 >= len(merged_val):
                                    if score_lon > score_srt:
                                        break
                                j += 1
                            if j == len(merged_lon):
                                # 判断是否为无效轨迹
                                j = 0
                                while j < len(merged_srt):
                                    if len(merged_val) <= 0:
                                        break
                                    condition_a = merged_srt[j][6] * merged_srt[j][7] > merged_val[max_val_pro][6] * merged_val[max_val_pro][7]
                                    condition_b = merged_srt[j][8] > merged_val[max_val_pro][8]
                                    condition_c = merged_srt[j][7] > merged_val[max_val_pro][7]
                                    if condition_a and condition_b and condition_c:
                                        break
                                    j += 1
                                if j == len(merged_srt):
                                    result = [merged_val[max_val_pro][0],
                                              merged_val[max_val_pro][6],
                                              merged_val[max_val_pro][7]]
                                    if len(result_history) > 0:
                                        if result[2] / result_history[-1][2] > result[1] * IGNORE_PROP:
                                            result_history.pop(-1)
                            else:
                                # 分割轨迹
                                result_history.append(result)
                                ang_list.clear()
                                dis_list.clear()
                                long_count = 0
                                cut_point.append(key_point[-1])
                                result = ['', 0, 0]
            cen_point.append(center)

            # 计算FPS
            frame_count += 1
            fps = int(frame_count / (time.time() - start_time))

            # 绘制轨迹和关键点
            if len(key_point) > 80:    # 只显示最新的点
                for i in range(len(key_point) - 80, len(key_point)):
                    if i > 0:
                        cv2.line(show_img, key_point[i], key_point[i - 1], (0, 0, 0), 2)
                    cv2.circle(show_img, key_point[i], 3, (0, 0, 255), cv2.FILLED)  # 关键点
            else:
                for i in range(len(key_point)):
                    if i > 0:
                        cv2.line(show_img, key_point[i], key_point[i - 1], (0, 0, 0), 2)
                    cv2.circle(show_img, key_point[i], 3, (0, 0, 255), cv2.FILLED)  # 关键点
            for i in range(len(cut_point)):
                cv2.circle(show_img, cut_point[i], 5, (255, 0, 255), cv2.FILLED)    # 分割点
            output = ''
            for i in range(len(result_history)):
                output += str(result_history[i][0])
            cv2.putText(show_img, 'ANS:' + output + str(result[0]) + '   FPS: ' + str(fps),
                        (int(REVISE_WIDTH / 20), int(REVISE_HEIGHT / 20)),
                        cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 0), 1)

            # 在一个窗口中展示多个
            multi_img = np.hstack([show_img, frame])
            cv2.namedWindow('show', 0)
            cv2.resizeWindow('show', 540, 480)
            cv2.imshow("show", multi_img)
            OUTPUT_VIDEO.write(multi_img)

            if AUTO_RUNNING:
                if cv2.waitKey(1) & 0xff == ord('q'):
                    raise KeyboardInterrupt('User interruption!')
            else:
                if cv2.waitKey(100000) & 0xff == ord('n'):
                    continue
        last_time = time.time()
        print('FPS: %d' % (frame_count / (last_time - start_time)))

    VIDEO_CAPTURE.release()
    OUTPUT_VIDEO.release()
    cv2.destroyAllWindows()
