import sys
import cv2
from rtmlib import Body, draw_skeleton
import numpy as np
import os
import json
from tqdm import *

from utils import *

import pdb


"""
本程序实现以下功能：
1. 用rtmpose做2D人体姿态推断
2. 保存人体姿态推断数据至 datasets/emohugo_pose
3. 保存人体姿态推断的可视化结果至 datasets/emohugo_pose_visual
"""


"""
推断2d人体姿态
输入模态：视频
输出模态：2d人体姿态json文件和可视化视频
"""
def inference_2d_and_save(video_path, inferencer, reault_data_path, result_vis_path=None, silent=False):
    assert os.path.exists(video_path)
    ensure_dir(os.path.abspath(os.path.dirname(reault_data_path)))

    cap = cv2.VideoCapture(video_path)
    frame_num = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    frame_idx = 0
    result_data = []
    if not result_vis_path is None:
        result_vis = []

    if not silent:
        print("inferencing 2d")
    for i in tqdm(range(frame_num), disable=silent):    # 逐帧推断
        success, frame = cap.read()
        frame_idx += 1

        if not success:
            print("out of frames")
            break
        keypoints, scores = inferencer(frame)   # 逐帧推断的结果
        """
        (Pdb) keypoints.shape
        (1, 17, 2)
        (Pdb) scores.shape
        (1, 17)
        (Pdb) scores
array([[9.9681413e-01, 9.9714959e-01, 9.9851024e-01, 9.9047101e-01,
        9.8043680e-01, 9.9788129e-01, 9.9698126e-01, 9.1446573e-01,
        8.6237812e-01, 9.6301389e-01, 8.5138345e-01, 2.6349217e-02,
        2.8093755e-02, 1.7276704e-03, 1.6635656e-03, 6.3836575e-04,
        2.5016069e-04]], dtype=float32)
        """

        if keypoints.shape[0] > 0:     # 一般前几帧是空白的
            result_keypoints_score = np.concatenate((keypoints[0].reshape(17,2), scores[0].reshape(17,1)), axis=1)
                # 强制选第一个人
            result_data_frame = {}
            result_data_frame["frame_idx"] = frame_idx
            result_data_frame["keypoints"] = result_keypoints_score.tolist()
            result_data.append(result_data_frame)

            if not result_vis_path is None:
                img_show = frame.copy()
                img_show = draw_skeleton(img_show,
                                        keypoints,
                                        scores,
                                        openpose_skeleton=False,
                                        kpt_thr=0.3,
                                        line_width=2)
                result_vis.append(img_show)
        else:
            frame_idx = 0
    
    json_str = json.dumps(result_data, indent=4)
    with open(reault_data_path,"w") as f:
        f.write(json_str)

    if not result_vis_path is None:
        if not silent:
            print("exporting 2d visual")
        ensure_dir(os.path.abspath(os.path.dirname(result_vis_path)))
        fps = cap.get(cv2.CAP_PROP_FPS)
        frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
        frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
        # cap.get见 https://blog.csdn.net/lanlinjnc/article/details/135913971
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        videowriter = cv2.VideoWriter(result_vis_path, fourcc, fps, (frame_width, frame_height), True)

        for img_show in tqdm(result_vis, disable=silent):
            videowriter.write(img_show)
        videowriter.release()

if __name__ == "__main__":
    inferencer_2d = Body(
        pose='rtmo', # 使用 rtmpose 和 yolo detector 会报错
        to_openpose=False,
        mode='performance',  # balanced, performance, lightweight
        backend='onnxruntime',
        device='cuda')  # onnxruntime 最高支持cuda11.8
    
    video_dataset_root = os.path.join(get_project_root(), "datasets/emohugo_video")
    
    video_path = os.path.join(get_project_root() , "datasets/emohugo_video/anger/000018-S002-08.avi")
    print("inferencing 2d: ", video_path)

    reault_data_path = emohugo_path_shift(video_path, "video", "pose_2d")
    result_vis_path = emohugo_path_shift(video_path, "video", "pose_2d_visual")
    print("saving to: \n", reault_data_path, "\n", result_vis_path)
    inference_2d_and_save(video_path, inferencer_2d, reault_data_path, result_vis_path, silent=True)
    # 导出视频只比不导出视频慢了2秒，绝大部分时间都花在推断上

