from argparse import ArgumentParser
import json
import os

import cv2
import numpy as np

from modules.input_reader import VideoReader, ImageReader
from modules.draw import Plotter3d, draw_poses
from modules.parse_poses import parse_poses

args = {
    "model": '../human-pose-estimation-3d.pth',
    "device": 'GPU',
    "images": ['../1.jpg', '../2.jpg', '../3.jpg'],
    "height_size": 256,
    "fx": -1
}



def rotate_poses(poses_3d, R, t):
    R_inv = np.linalg.inv(R)
    for pose_id in range(len(poses_3d)):
        pose_3d = poses_3d[pose_id].reshape((-1, 4)).transpose()
        pose_3d[0:3, :] = np.dot(R_inv, pose_3d[0:3, :] - t)
        poses_3d[pose_id] = pose_3d.transpose().reshape(-1)

    return poses_3d


def parse_gesture_result():
    pass


if __name__ == '__main__':
    if args.get('video') is None and args.get('images') is None:
        raise ValueError('video or image has to be provided one')

    stride = 8
    from modules.inference_engine_pytorch import InferenceEnginePyTorch
    net = InferenceEnginePyTorch(args.get('model'), args.get('device'))
    file_path = None
    if file_path is None:
        file_path = os.path.join('../data', 'extrinsics.json')
    with open(file_path, 'r') as f:
        extrinsics = json.load(f)
    R = np.array(extrinsics['R'], dtype=np.float32)
    t = np.array(extrinsics['t'], dtype=np.float32)

    frame_provider = ImageReader(args.get('images'))
    is_video = False
    if args.get('video') is not None:
        frame_provider = VideoReader(args.get('video'))
        is_video = True
    base_height = args.get('height_size')
    fx = args.get('fx')

    # test_count = 1

    result_set = []
    for frame in frame_provider:
        # print('test_count= ', test_count)
        # test_count += 1
        if frame is None:
            break
        input_scale = base_height / frame.shape[0]
        scaled_img = cv2.resize(frame, dsize=None, fx=input_scale, fy=input_scale)
        scaled_img = scaled_img[:, 0:scaled_img.shape[1] - (scaled_img.shape[1] % stride)]  # better to pad, but cut out for demo
        if fx is None or fx < 0:  # Focal length is unknown
            fx = np.float32(0.8 * frame.shape[1])

        # 推断结果
        inference_result = net.infer(scaled_img)
        poses_3d, poses_2d = parse_poses(inference_result, input_scale, stride, fx, is_video)
        edges = []
        if len(poses_3d):
            poses_3d = rotate_poses(poses_3d, R, t)
            poses_3d_copy = poses_3d.copy()
            # print('', poses_3d_copy)
            # print(poses_3d_copy.shape[0])
            x = poses_3d_copy[:, 0::4]
            y = poses_3d_copy[:, 1::4]
            z = poses_3d_copy[:, 2::4]
            poses_3d[:, 0::4], poses_3d[:, 1::4], poses_3d[:, 2::4] = -z, x, -y

            poses_3d = poses_3d.reshape(poses_3d.shape[0], 19, -1)[:, :, 0:3]
            result_set.append(poses_3d[0])
            # print('==\n', poses_3d)
            # print('', poses_2d)
            # edges = (Plotter3d.SKELETON_EDGES + 19 * np.arange(poses_3d.shape[0]).reshape((-1, 1, 1))).reshape((-1, 2))
            # print('', edges)
    # for item in result_set:
    #     print(item)
    #     print('\n')
    a = np.subtract(result_set[0], result_set[1])
    b = np.subtract(result_set[0], result_set[2])

    print(np.around(a, 2))
    print(np.around(b, 2))

