import os
from os.path import join, isdir, isfile
import argparse
import numpy as np
import cv2
import torch
import trimesh
from optimization.bvh2smplx import optimize_pose_sequence
from utils.visualization import save_pcd, render_SMPLX_sequence
from utils.betas import get_betas_dict
from utils.VTS_object import get_obj_info
from utils.time_align import time_align, prepare_objpose
from utils.process_timestamps import txt_to_paried_frameids
from utils.get_person_name import get_person_name
from utils.load_smplx_params import load_multiperson_smplx_params
from utils.HOI_retargeting import simplify_mesh
from optimization.bvh2smplx import optimize_shape

def get_args_and_cfg():
    parser = argparse.ArgumentParser()
    ############# 数据 ########################
    parser.add_argument('--obj_dataset_dir', type=str, default="/share/datasets/HHO_object_dataset_final")
    parser.add_argument('--dataset_dir', type=str, default="/share/hhodataset/")
    parser.add_argument('--clip_name', type=str, default="20231002")
    parser.add_argument('--gpu_info', type=str, default="0/1")  # 程序使用的GPU编号 / 一共几个GPU, 要求每个GPU上恰好跑一个程序, 把任务均分给每个程序
    ###########################################

    ############# 调参实验 ####################
    parser.add_argument('--lr', type=float, default=1e-2)
    parser.add_argument('--weight_joint3d_body', type=float, default=1.0)
    parser.add_argument('--weight_joint3d_hand', type=float, default=1.0)
    parser.add_argument('--weight_regularizer_body', type=float, default=1e-3)
    parser.add_argument('--weight_regularizer_hand', type=float, default=1e-4)
    parser.add_argument('--weight_smoothness', type=float, default=1e1)
    parser.add_argument('--weight_regularizer_to_init', type=float, default=1e-2)
    parser.add_argument('--save_suffix', type=str, default="")
    ##########################################
    
    args = parser.parse_args()
    cfg = {
        "weight_joint3d_body": args.weight_joint3d_body,
        "weight_joint3d_hand": args.weight_joint3d_hand,
        "weight_regularizer_body": args.weight_regularizer_body,
        "weight_regularizer_hand": args.weight_regularizer_hand,
        "weight_smoothness": args.weight_smoothness,
        "weight_regularizer_to_init": args.weight_regularizer_to_init,
    }

    return args, cfg


if __name__ == "__main__":
    args, cfg = get_args_and_cfg()
    # test_smplx()


# 脖子到脚腕，大臂，小臂，大腿，小腿 132 32 27 58 37     179 146 71 25 27 60 43
    # human_scale = np.float32([135, 45, 30, 60, 43]) / 100  # type: ignore # new
    # betas = optimize_shape(human_scale, from_bvh=False)
    # print("betas:", betas)
    # exit(0)
    
    dataset_dir = args.dataset_dir
    clip_name = args.clip_name
    data_dir = join(dataset_dir, "VTS", clip_name)
    person1_name, person2_name = get_person_name(join(dataset_dir,"VTS", clip_name, "person_name.txt"))

    # person2 wears ego 
    betas_dict = get_betas_dict()
    person1_beta = betas_dict[person1_name] # type: ignore
    person2_beta = betas_dict[person2_name] # type: ignore
    # print(person1_name, person1_beta)
    # print(person2_name, person2_beta)
    # exit(0)

    end_link_trans = np.float32([  # 刘昀, 20230323_debug2
        [0, 8.5, 0],
        [4, -8, 20],
        [-4, -8, 17],
        [-4, -8, 20],
        [4, -8, 17],
        [-3, 0, 0],
        [-2.8, 0, 0],
        [-2.8, 0, 0],
        [-2.8, 0, 0],
        [-2.5, 0, 0],
        [3, 0, 0],
        [2.8, 0, 0],
        [2.8, 0, 0],
        [2.8, 0, 0],
        [2.5, 0, 0],
    ]) # type: ignore

    # get all sequence names
    seq_names = []
    for seq_name in os.listdir(data_dir):
        seq_dir = join(data_dir, seq_name)
        if not isdir(seq_dir):
            continue
        seq_names.append(seq_name)
    seq_names.sort()
    
    GPU_number = int(args.gpu_info.split("/")[1])
    current_GPU = int(args.gpu_info.split("/")[0])
    # device = "cuda:" + str(current_GPU)
    device = "cuda:0"
    # 只做本程序自己分到的任务
    K = int(np.ceil(len(seq_names) / GPU_number))
    seq_names = seq_names[K * current_GPU : min(K * (current_GPU + 1), len(seq_names))]
    print(seq_names)
    
    # fourcc = cv2.VideoWriter_fourcc(*'mp4v')

    print("sequence number =", len(seq_names))
    for seq_name in seq_names:
        
        
        seq_dir = join(data_dir, seq_name)
        save_dir = join(seq_dir, "SMPLX_fitting")
        
        # if exists, skip
        # if isdir(save_dir):
        #     continue
        
        if not isdir(seq_dir):
            continue
        
        try:
        
        
            # if seq_name != "017":
            #     continue
            
            # videoWriter = cv2.VideoWriter(join(seq_dir, "vis_smplx_{}.mp4".format(clip_name)), fourcc, 10, (640, 360))
            
            
            print("processing {} ...".format(seq_dir))
            
            torch.cuda.empty_cache()
            
            if not isfile(join(seq_dir, "VTS_data.npz")):
                print("#################### [error] no VTS_data.npz !!! #####################")
                continue
            
            # if not isfile(join(seq_dir, "aligned_frame_ids.txt")):
            if False:  # TODO: this is debugging
                print("aligning frames {} ...".format(seq_dir))
                cfg = {
                    "camera1": True,
                    "camera2": True,
                    "camera3": True,
                    "camera4": True,
                    "person1": True,
                    "person2": True,
                    "object": False,
                }
                VTS_add_time = time_align(seq_dir, cfg, threshould=40000000, VTS_add_time=None)
                
                obj_name, obj_model_path = get_obj_info(seq_dir, args.obj_dataset_dir)
                if not obj_name is None:
                    prepare_objpose(seq_dir, obj_name, threshould=40000000, VTS_add_time=VTS_add_time)
            
            paired_frames = txt_to_paried_frameids(join(seq_dir, "aligned_frame_ids.txt"))
            
            # read object data
            # object_data = None
            # if isfile(join(seq_dir, "aligned_objposes.npy")):
            #     obj_name, obj_model_path = get_obj_info(seq_dir, args.obj_dataset_dir)
            #     assert isfile(obj_model_path)
            #     obj2world = np.load(join(seq_dir, "aligned_objposes.npy"))
            #     obj_mesh = trimesh.load_mesh(obj_model_path)
            #     if obj_mesh.vertices.shape[0] > 10000:
            #         obj_mesh, _ = simplify_mesh(obj_mesh, obj_mesh)
            #         print("downsampled object mesh: vertices shape =", obj_mesh.vertices.shape)
            #     object_data = {
            #         "mesh": obj_mesh,
            #         "obj2world": obj2world,
            #     }
                
                
                
            end_frame_idx = None

            
            # 
            print("start optimizing {} person 1 ...".format(seq_dir))
            save_person_dir = join(save_dir, "person_1")
            selected_frames = [x[4] for x in paired_frames]
            smplx_params = optimize_pose_sequence(join(seq_dir, "VTS_data.npz"), person_id=1, betas=person1_beta, start_frame_idx=0, end_frame_idx=end_frame_idx, end_link_trans=end_link_trans, save_dir=save_person_dir, cfg=cfg, device=device, selected_frames=selected_frames)
            print("start optimizing {} person 2 ...".format(seq_dir))
            save_person_dir = join(save_dir, "person_2")
            selected_frames = [x[5] for x in paired_frames]
            smplx_params = optimize_pose_sequence(join(seq_dir, "VTS_data.npz"), person_id=2, betas=person2_beta, start_frame_idx=0, end_frame_idx=end_frame_idx, end_link_trans=end_link_trans, save_dir=save_person_dir, cfg=cfg, device=device, selected_frames=selected_frames)
            
            # render optimization result
            # cfg = {
            #     "render_person1": True,
            #     "render_person2": True,
            #     "render_object": True,
            # }
            # if object_data is None:
            #     cfg["render_object"] = False
            # camera_name = "d455_1"
            # camera_intrin_path = join(dataset_dir, "rawdata", clip_name, "param", camera_name, "intrinsic.txt")
            # camera_pose_path = join(dataset_dir, "rawdata", clip_name, "param", camera_name, "camera2world.txt")
            # M = 50
            # frame_range = [0, len(paired_frames) - 1]  # visualize all frames
            # save_path = join(save_dir, "vis_optim.mp4")
            # if not object_data is None:
            #     object_data["obj2world"] = object_data["obj2world"][frame_range[0] : frame_range[1] + 1]
            
            # valid = False
            # try:
            #     load_multiperson_smplx_params(join(seq_dir, "SMPLX_fitting"), start_frame=frame_range[0], end_frame=frame_range[1], device=device)
            #     valid = True
            # except:
            #     print("###### incomplete data : {} ######".format(seq_dir))
            # if valid:
            #     render_SMPLX_sequence(seq_dir, camera_name, camera_intrin_path, camera_pose_path, frame_range, M, save_path, cfg, object_data=object_data, paired_frames=paired_frames, render_contact=True, visualize_head_orientation=True, video_writer=videoWriter, device=device)

            # videoWriter.release()
        except Exception as e:
            print(str(e))
            continue