import sys
sys.path.append("..")
import numpy as np
from utils.VTS_object import get_obj_info
import trimesh
from utils.load_smplx_params import load_multiperson_smplx_params
from os.path import join, isfile, isdir
from smplx import smplx
import os
from PIL import Image
from moviepy.editor import VideoFileClip
import argparse



def get_args_and_cfg():
    parser = argparse.ArgumentParser()
    ############# 数据 ########################
    parser.add_argument('--obj_dataset_dir', type=str, default="/share/datasets/HHO_object_dataset_final")
    parser.add_argument('--dataset_dir', type=str, default="/share/hhodataset/")
    parser.add_argument('--clip_name', type=str, default="20231002")
    parser.add_argument('--gpu_info', type=str, default="0/1")  # 程序使用的GPU编号 / 一共几个GPU, 要求每个GPU上恰好跑一个程序, 把任务均分给每个程序
    ###########################################

    ############# 调参实验 ####################
    parser.add_argument('--lr', type=float, default=1e-2)
    parser.add_argument('--weight_joint3d_body', type=float, default=1.0)
    parser.add_argument('--weight_joint3d_hand', type=float, default=1.0)
    parser.add_argument('--weight_regularizer_body', type=float, default=1e-3)
    parser.add_argument('--weight_regularizer_hand', type=float, default=1e-4)
    parser.add_argument('--weight_smoothness', type=float, default=1e1)
    parser.add_argument('--weight_regularizer_to_init', type=float, default=1e-2)
    parser.add_argument('--save_suffix', type=str, default="")
    ##########################################
    
    args = parser.parse_args()
    cfg = {
        "weight_joint3d_body": args.weight_joint3d_body,
        "weight_joint3d_hand": args.weight_joint3d_hand,
        "weight_regularizer_body": args.weight_regularizer_body,
        "weight_regularizer_hand": args.weight_regularizer_hand,
        "weight_smoothness": args.weight_smoothness,
        "weight_regularizer_to_init": args.weight_regularizer_to_init,
    }

    return args, cfg



def extract_frame_from_video(filename, frame_number, output_path):
    video = VideoFileClip(filename)
    frame = video.get_frame(frame_number / video.fps)
    image = Image.fromarray(frame)
    image.save(output_path)


def HOI_visualization_raw(obj_dataset_dir, data_dir, save_pth, start_frame, end_frame, device="cuda:0"):
    _, obj_data_path = get_obj_info(data_dir, obj_dataset_dir)
    print(obj_data_path)
    origin_mesh = trimesh.load(obj_data_path)
    origin_obj_pose = np.load(join(data_dir, "aligned_objposes.npy"), allow_pickle=True)
    if len(origin_obj_pose) < end_frame:
        end_frame = len(origin_obj_pose)
        print("Warning: end_frame is larger than the length of object_result, set end_frame to {}".format(str(end_frame)))
    origin_vert, origin_face = origin_mesh.vertices, origin_mesh.faces
        
    smplx_model = smplx.create("/share/human_model/models", model_type="smplx", gender="neutral", use_face_contour=False, num_betas=10, num_expression_coeffs=10, ext="npz", use_pca=True, num_pca_comps=12, flat_hand_mean=True).to(device)
    step = 40
    multiperson_SMPLX_params = load_multiperson_smplx_params(join(data_dir, "SMPLX_fitting"), start_frame=0, end_frame=end_frame, device=device)
    origin_p1_SMPLX_params = multiperson_SMPLX_params["person1"]
    origin_p2_SMPLX_params = multiperson_SMPLX_params["person2"]
    
    for j in range(start_frame, end_frame, step):
        print(j)
        if not os.path.exists(join(save_pth, "smplx_hho_ply")):
            os.makedirs(os.path.join(save_pth, "smplx_hho_ply"))
        # for i in range(1,5):
        #     video_filename = os.path.join(data_dir, "_d455_camera{}_color_image_raw.mp4".format(str(i)))
        #     vid_output_path = join(save_pth, "smplx_hho_ply","{}_{}.jpg".format(j,i))  # 输出帧的图像文件名
        #     extract_frame_from_video(video_filename, j, vid_output_path)
        origin_vert_seq = origin_vert @ origin_obj_pose[j][:3, :3].T + origin_obj_pose[j][:3, 3]
        origin_mesh = trimesh.Trimesh(vertices=origin_vert_seq, faces=origin_face)
        ori_p1_beta, ori_p1_expression, ori_p1_body_pose, ori_p1_transl, ori_p1_global_orient, ori_p1_left_hand_pose, ori_p1_right_hand_pose = origin_p1_SMPLX_params["betas"][j].unsqueeze(0), origin_p1_SMPLX_params["expression"][j].unsqueeze(0), origin_p1_SMPLX_params["body_pose"][j].unsqueeze(0), origin_p1_SMPLX_params["transl"][j].unsqueeze(0), origin_p1_SMPLX_params["global_orient"][j].unsqueeze(0), origin_p1_SMPLX_params["left_hand_pose"][j].unsqueeze(0), origin_p1_SMPLX_params["right_hand_pose"][j].unsqueeze(0)
        ori_p2_beta, ori_p2_expression, ori_p2_body_pose, ori_p2_transl, ori_p2_global_orient, ori_p2_left_hand_pose, ori_p2_right_hand_pose = origin_p2_SMPLX_params["betas"][j].unsqueeze(0), origin_p2_SMPLX_params["expression"][j].unsqueeze(0), origin_p2_SMPLX_params["body_pose"][j].unsqueeze(0), origin_p2_SMPLX_params["transl"][j].unsqueeze(0), origin_p2_SMPLX_params["global_orient"][j].unsqueeze(0), origin_p2_SMPLX_params["left_hand_pose"][j].unsqueeze(0), origin_p2_SMPLX_params["right_hand_pose"][j].unsqueeze(0)
        ori_p1_model = smplx_model(betas=ori_p1_beta, expression=ori_p1_expression, body_pose=ori_p1_body_pose, transl=ori_p1_transl, global_orient=ori_p1_global_orient, left_hand_pose=ori_p1_left_hand_pose, right_hand_pose=ori_p1_right_hand_pose, return_verts=True)
        ori_p2_model = smplx_model(betas=ori_p2_beta, expression=ori_p2_expression, body_pose=ori_p2_body_pose, transl=ori_p2_transl, global_orient=ori_p2_global_orient, left_hand_pose=ori_p2_left_hand_pose, right_hand_pose=ori_p2_right_hand_pose, return_verts=True)
        
        ori_p1_mesh = trimesh.Trimesh(vertices=ori_p1_model.vertices.detach().cpu().numpy()[0], faces=ori_p1_model.faces.detach().cpu().numpy())
        ori_p2_mesh = trimesh.Trimesh(vertices=ori_p2_model.vertices.detach().cpu().numpy()[0], faces=ori_p2_model.faces.detach().cpu().numpy())
        
        hho_mesh = trimesh.util.concatenate([origin_mesh, ori_p1_mesh, ori_p2_mesh])
        hho_mesh.export(join(save_pth, "smplx_hho_ply", "{}.ply".format(str(j))),file_type="ply")
    
# /share/hhodataset/VTS/20231002/007/SMPLX_fitting
if __name__ == "__main__":
    set_ = [ "20231002"]
    data_root_file = "/share/hhodataset/VTS/"
    obj_dataset_dir = "/data3/datasets/HHO_object_dataset_final"
    args, cfg = get_args_and_cfg()
    clip_name = args.clip_name
    data_root_file = join(data_root_file, clip_name)
    # dirs = ["007","020", "012"]
    # /share/datasets/HHO_dataset/data/20230806_1/006 006
    #  0805_2 006  0807_1 003 0805_1 016 035 0807_2 033 035 
    seq_names = []
    for seq_name in os.listdir(data_root_file):
        seq_dir = join(data_root_file, seq_name)
        vis_dir = join(seq_dir, "smplx_hho_ply")
        if not isdir(seq_dir):
            continue
        if isdir(vis_dir):
            continue
        seq_names.append(seq_name)
    seq_names.sort()
    
    GPU_number = int(args.gpu_info.split("/")[1])
    current_GPU = int(args.gpu_info.split("/")[0])
    device = "cuda:0"
    K = int(np.ceil(len(seq_names) / GPU_number))
    seq_names = seq_names[K * current_GPU : min(K * (current_GPU + 1), len(seq_names))]
    print(seq_names)
    
    
    for dir in seq_names:
    #     # if not dir == '003':
    #     #     continue
        data_dir = join(data_root_file, dir)
        # if isfile(data_dir):
        #     continue
      
        try:
            print(data_dir)
            HOI_visualization_raw(obj_dataset_dir, data_dir, data_dir, 10, 400)
        except Exception as e:
            print(f"{e} in {data_dir}")
            continue
    
    
    # for i in set_:
    #     for j in dirs:
    #         try:
            
    #             data_root_file = os.path.join("/share/datasets/HHO_dataset/data/",i)
    #             data_dir = join(data_root_file, j)
    #             print(data_dir)
    #             HOI_visualization_raw(obj_dataset_dir, data_dir, data_dir, 150, 300)
    #         except Exception as e:
    #             print(str(e))
    #             continue
    