#!/usr/bin/env python
# _*_ coding: utf-8 _*_
# @Time : 2021/4/3 12:35 上午
# @Author : sxt
# @Version：V 1.0
# @File : gesture_comparing_tool.py
# @desc : 姿态识别工具

# 关键点名称
import json
import os

import cv2
import numpy as np

from gesture_calculation.gesture_algorithm.modules.inference_engine_pytorch import InferenceEnginePyTorch
from gesture_calculation.gesture_algorithm.modules.input_reader import ImageReader
from gesture_calculation.gesture_algorithm.modules.parse_poses import parse_poses
from threedGestureProject.settings import BASE_DIR

PWD = f'{BASE_DIR}/gesture_calculation/gesture_algorithm'

# 运行时必须参数
args = {
    "model": f'{PWD}/human-pose-estimation-3d.pth',
    "device": 'CPU',
    "height_size": 256,
    "fx": -1
}

kpt_names = ['脖子', '鼻子', '盆骨',
             '左肩', '左肘', '左手腕', '左臀部', '左膝盖', '左脚踝',
             '右肩', '右肘', '右手腕', '右臀部', '右膝盖', '右脚踝',
             '右眼', '左眼',
             '右耳', '左耳']

pth_cache = dict()


def pose3d_centralized(pose_3d: np.ndarray, center: np.ndarray) -> np.ndarray:
    """
    3d 关键点中心化 所有的关键点向量减去中心点的向量
    :param pose_3d: 需要中心化的矩阵(3维)
    :param center: 中心点向量
    :return: 中心化后的矩阵
    """
    centralized_result = []
    center_x, center_y, center_z = center
    for x_co, y_co, z_co in pose_3d:
        centralized_result.append([x_co - center_x, y_co - center_y, z_co - center_z])
    return np.array(centralized_result)


def get_max_distance(pose_3d: np.ndarray) -> np.ndarray:
    """
    分别获取三个轴最长的距离
    :param pose_3d: 目标矩阵(3维)
    :return: 三个轴最长的距离的矩阵 [x, y, z]
    """
    return np.array([max(pose_3d[:, 0::4])[0],
                     max(pose_3d[:, 1::4])[0],
                     max(pose_3d[:, 2::4])[0]])


def get_relative_pose3d(pose_3d: np.ndarray, max_distance: np.ndarray) -> np.ndarray:
    """
    对应点的对应坐标/最长距离
    :param pose_3d: 目标矩阵(3维)
    :param max_distance: 最长距离矩阵
    :return: 处理后的结果
    """
    result = []
    max_x, max_y, max_z = max_distance
    for x_co, y_co, z_co in pose_3d:
        result.append([x_co / max_x, y_co / max_y, z_co / max_z])
    return np.array(result)


def _create_tmp_image(f, image):
    """
    将图片写入本地
    :param f: 文件
    :param image: 图像
    :return: ImageReader standard_path comparing_path
    """
    for chunk in image.chunks():
        f.write(chunk)
    f.close()


def parse_images(files) -> (ImageReader, str, str):
    image_1 = files[0]
    image_2 = files[1]
    standard_path = f'{PWD}/tmp/{image_1.name}'
    comparing_path = f'{PWD}/tmp/{image_2.name}'
    _create_tmp_image(open(standard_path, 'wb'), image_1)
    _create_tmp_image(open(comparing_path, 'wb'), image_2)
    return ImageReader([standard_path, comparing_path]), standard_path, comparing_path


def rotate_poses(poses_3d, r, t):
    r_inv = np.linalg.inv(r)
    for pose_id in range(len(poses_3d)):
        pose_3d = poses_3d[pose_id].reshape((-1, 4)).transpose()
        pose_3d[0:3, :] = np.dot(r_inv, pose_3d[0:3, :] - t)
        poses_3d[pose_id] = pose_3d.transpose().reshape(-1)
    return poses_3d


# noinspection SpellCheckingInspection
def gesture_comparing(files) -> dict:
    frame_provider, standard_path, comparing_path = parse_images(files)
    stride = 8

    net = pth_cache.get('net')
    if net is None:
        net = InferenceEnginePyTorch(args.get('model'), args.get('device'))
        pth_cache['net'] = net
    file_path = None
    if file_path is None:
        file_path = os.path.join(f'{PWD}/data', 'extrinsics.json')
    with open(file_path, 'r') as f:
        extrinsics = json.load(f)
    r = np.array(extrinsics['R'], dtype=np.float32)
    t = np.array(extrinsics['t'], dtype=np.float32)

    base_height = args.get('height_size')
    fx = args.get('fx')
    result_set = []
    for frame in frame_provider:
        # print('test_count= ', test_count)
        # test_count += 1
        if frame is None:
            break
        """ 
        关于frame.shape[0]、[1]、[2]
        frame.shape[0]：图像的垂直尺寸（高度）
        frame.shape[1]：图像的水平尺寸（宽度）
        frame.shape[2]：图像的通道数
        """
        input_scale = base_height / frame.shape[0]
        # 将图片 resize 到指定大小
        scaled_img = cv2.resize(frame, dsize=None, fx=input_scale, fy=input_scale)
        # better to pad, but cut out for demo
        scaled_img = scaled_img[:, 0:scaled_img.shape[1] - (scaled_img.shape[1] % stride)]
        if fx is None or fx < 0:  # 未知焦距
            fx = np.float32(0.8 * frame.shape[1])

        # 推断结果
        inference_result = net.infer(scaled_img)
        poses_3d, poses_2d = parse_poses(inference_result, input_scale, stride, fx)
        if len(poses_3d):
            poses_3d = rotate_poses(poses_3d, r, t)
            poses_3d_copy = poses_3d.copy()
            x = poses_3d_copy[:, 0::4]
            y = poses_3d_copy[:, 1::4]
            z = poses_3d_copy[:, 2::4]
            poses_3d[:, 0::4], poses_3d[:, 1::4], poses_3d[:, 2::4] = -z, x, -y

            poses_3d = poses_3d.reshape(poses_3d.shape[0], 19, -1)[:, :, 0:3]
            result_set.append(poses_3d[0])

    # 获取标准的姿态矩阵和待比较的姿态矩阵
    standard_poses_3d: np.ndarray = result_set[0]
    comparing_pose_3d = result_set[1]
    # 以鼻子为关键点，id == 1
    key_point = standard_poses_3d[1]
    # 关键点中心化 所有的关键点向量减去中心点的向量
    standard_poses_3d = pose3d_centralized(standard_poses_3d, key_point)
    comparing_pose_3d = pose3d_centralized(comparing_pose_3d, key_point)
    # 分别找出三个轴最长的距离
    max_standard = get_max_distance(standard_poses_3d)
    max_comparing = get_max_distance(comparing_pose_3d)
    # 对应点的对应坐标/最长距离
    standard_poses_3d = get_relative_pose3d(standard_poses_3d, max_standard)
    comparing_pose_3d = get_relative_pose3d(comparing_pose_3d, max_comparing)
    # 矩阵求差 -> de
    error_matrix = np.subtract(standard_poses_3d, comparing_pose_3d)
    # 求二范数: 向量点乘 开方  得到 一个向量
    # result = cal_2norm(error_matrix).tolist()
    result = np.linalg.norm(error_matrix, axis=1, keepdims=False).tolist()
    result_dict = {}
    for i, item in enumerate(result):
        result_dict[kpt_names[i]] = item
    os.remove(standard_path)
    os.remove(comparing_path)
    return dict(sorted(result_dict.items(), key=lambda kv: (kv[1], kv[0]), reverse=True))
