# 2025/1/25: (liuyun) TODO: for CORE4D-Real V3

import os
from os.path import join, isdir, isfile, dirname, abspath
import sys
sys.path.insert(0, dirname(abspath(__file__)))
import argparse
import numpy as np
import cv2
import torch
import trimesh
import time
# from optimization.bvh2smplx import optimize_pose_sequence
from optimization.bvh2smplx_vposer import optimize_pose_sequence
from utils.visualization import save_pcd, render_SMPLX_sequence
from utils.betas import get_betas_dict
from utils.VTS_object import get_obj_info
from utils.time_align import time_align, prepare_objpose
from utils.process_timestamps import txt_to_paried_frameids
from utils.get_person_name import get_person_name
from utils.load_smplx_params import load_multiperson_smplx_params
from utils.simplify_mesh import simplify_mesh
from optimization.bvh2smplx import optimize_shape


def get_args_and_cfg():
    parser = argparse.ArgumentParser()
    ############# 数据 ########################
    parser.add_argument('--obj_dataset_dir', type=str, default="/data2/datasets/HHO_object_dataset_final_simplified")
    parser.add_argument('--dataset_dir', type=str, default="/share/hhodataset/")
    # parser.add_argument('--handpose_folder_name', type=str, default="handpose_20250428")
    parser.add_argument('--save_folder_name', type=str, default="SMPLX_fitting_VPoser_20250428")
    parser.add_argument('--clip_name', type=str, default="20231008")
    parser.add_argument('--seq_name', type=str, default=None)  # 设置这个则表示只跑一段, 例子: "20231008/000"
    parser.add_argument('--gpu_info', type=str, default="0/1")  # 程序使用的GPU编号 / 一共几个GPU, 要求每个GPU上恰好跑一个程序, 把任务均分给每个程序
    ###########################################

    ############# 调参实验 ####################
    parser.add_argument('--lr', type=float, default=1e-2)
    parser.add_argument('--weight_joint3d_body', type=float, default=1.0)
    parser.add_argument('--weight_joint3d_hand', type=float, default=1.0)
    parser.add_argument('--weight_regularizer_body', type=float, default=1e-3)
    parser.add_argument('--weight_regularizer_hand', type=float, default=1e-4)
    parser.add_argument('--weight_smoothness', type=float, default=1e1)
    parser.add_argument('--weight_regularizer_to_init', type=float, default=1e-2)
    parser.add_argument('--save_suffix', type=str, default="")
    ##########################################
    
    args = parser.parse_args()
    cfg = {
        "weight_joint3d_body": args.weight_joint3d_body,
        "weight_joint3d_hand": args.weight_joint3d_hand,
        "weight_regularizer_body": args.weight_regularizer_body,
        "weight_regularizer_hand": args.weight_regularizer_hand,
        "weight_smoothness": args.weight_smoothness,
        "weight_regularizer_to_init": args.weight_regularizer_to_init,
    }

    return args, cfg


if __name__ == "__main__":
    args, cfg = get_args_and_cfg()
    # test_smplx()


# 脖子到脚腕，大臂，小臂，大腿，小腿 132 32 27 58 37     179 146 71 25 27 60 43
    # human_scale = np.float32([135, 45, 30, 60, 43]) / 100  # type: ignore # new
    # betas = optimize_shape(human_scale, from_bvh=False)
    # print("betas:", betas)
    # exit(0)
    
    dataset_dir = args.dataset_dir
    if args.seq_name is None:
        clip_name = args.clip_name
    else:
        clip_name = args.seq_name.split("/")[0]
    data_dir = join(dataset_dir, "VTS", clip_name)
    person1_name, person2_name = get_person_name(join(dataset_dir,"VTS", clip_name, "person_name.txt"))
    print("person1, person2 names:", person1_name, person2_name)

    # person2 wears ego 
    betas_dict = get_betas_dict()
    person1_beta = betas_dict[person1_name] # type: ignore
    person2_beta = betas_dict[person2_name] # type: ignore
    # print(person1_name, person1_beta)
    # print(person2_name, person2_beta)
    # exit(0)

    end_link_trans = np.float32([  # 刘昀, 20230323_debug2
        [0, 8.5, 0],
        [4, -8, 20],
        [-4, -8, 17],
        [-4, -8, 20],
        [4, -8, 17],
        [-3, 0, 0],
        [-2.8, 0, 0],
        [-2.8, 0, 0],
        [-2.8, 0, 0],
        [-2.5, 0, 0],
        [3, 0, 0],
        [2.8, 0, 0],
        [2.8, 0, 0],
        [2.8, 0, 0],
        [2.5, 0, 0],
    ]) # type: ignore

    # get all sequence names
    if args.seq_name is None:
        seq_names = []
        for seq_name in os.listdir(data_dir):
            seq_dir = join(data_dir, seq_name)
            if (not isdir(seq_dir)) or (seq_name == "tst"):
                continue
            seq_names.append(seq_name)
        seq_names.sort()
    else:
        seq_names = [args.seq_name.split("/")[-1]]
    
    GPU_number = int(args.gpu_info.split("/")[1])
    current_GPU = int(args.gpu_info.split("/")[0])
    # device = "cuda:" + str(current_GPU)
    device = "cuda:0"
    # 只做本程序自己分到的任务
    K = int(np.ceil(len(seq_names) / GPU_number))
    seq_names = seq_names[K * current_GPU : min(K * (current_GPU + 1), len(seq_names))]
    print(seq_names)
    
    fourcc = cv2.VideoWriter_fourcc(*'mp4v')

    print("sequence number =", len(seq_names))
    for seq_name in seq_names:

        seq_dir = join(data_dir, seq_name)
        # handpose_dir = join(seq_dir, args.handpose_folder_name)
        save_folder_name = args.save_folder_name
        save_dir = join(seq_dir, save_folder_name)
            
        # # if exists, skip
        # if isdir(save_dir):
        #     continue
            
        if not isdir(seq_dir):
            continue
                
        videoWriter = cv2.VideoWriter(join(seq_dir, "vis_smplx_{}_{}_{}.mp4".format(clip_name, seq_name, "VPoser" if (save_folder_name.find("_VPoser") > -1) else "NoPrior")), fourcc, 10, (1280, 720))
                
        print("processing {} ...".format(seq_dir))
            
        torch.cuda.empty_cache()
                
        if not isfile(join(seq_dir, "VTS_data.npz")):
            print("#################### [error] no VTS_data.npz !!! #####################")
            continue
                
        # if not isfile(join(seq_dir, "aligned_frame_ids.txt")):
        if False:  # TODO: this is debugging
            print("aligning frames {} ...".format(seq_dir))
            cfg = {
                "camera1": True,
                "camera2": True,
                "camera3": True,
                "camera4": True,
                "person1": True,
                "person2": True,
                "object": False,
            }
            VTS_add_time = time_align(seq_dir, cfg, threshould=40000000, VTS_add_time=None)
              
            obj_name, obj_model_path = get_obj_info(seq_dir, args.obj_dataset_dir)
            if not obj_name is None:
                prepare_objpose(seq_dir, obj_name, threshould=40000000, VTS_add_time=VTS_add_time)
        
        if not isfile(join(seq_dir, "aligned_frame_ids.txt")):
            print("#################### [error] no aligned_frame_ids.txt !!! #####################")
            continue
        paired_frames = txt_to_paried_frameids(join(seq_dir, "aligned_frame_ids.txt"))
                
        # read object data
        object_data = None
        if isfile(join(seq_dir, "aligned_objposes.npy")):
            obj_name, obj_model_path = get_obj_info(seq_dir, args.obj_dataset_dir)
            assert isfile(obj_model_path)
            obj2world = np.load(join(seq_dir, "aligned_objposes.npy"))
            print("load object model from:", obj_model_path)
            obj_mesh = trimesh.load_mesh(obj_model_path)
            print("object mesh:", obj_mesh)
            if obj_mesh.vertices.shape[0] > 10000:
                obj_mesh, _ = simplify_mesh(obj_mesh)
                print("downsampled object mesh: vertices shape =", obj_mesh.vertices.shape)
            object_data = {
                "mesh": obj_mesh,
                "obj2world": obj2world,
            }
        else:
            print("#################### [error] no aligned_objposes.npy !!! #####################")
            continue
                    
        end_frame_idx = None
                
        # [main part] optimization
        print("start optimizing {} person 1 ...".format(seq_dir))
        start_time = time.time()
        # handpose_person_dir = join(handpose_dir, "person_1")
        save_person_dir = join(save_dir, "person_1")
        selected_frames = [x[4] for x in paired_frames]
        smplx_params = optimize_pose_sequence(join(seq_dir, "VTS_data.npz"), person_id=1, betas=person1_beta, start_frame_idx=0, end_frame_idx=end_frame_idx, end_link_trans=end_link_trans, save_dir=save_person_dir, cfg=cfg, device=device, selected_frames=selected_frames, handpose_dir=None)
        print("finish optimizing {} person 1, time cost = {}".format(seq_dir, time.time() - start_time))
        print("start optimizing {} person 2 ...".format(seq_dir))
        start_time = time.time()
        # handpose_person_dir = join(handpose_dir, "person_2")
        save_person_dir = join(save_dir, "person_2")
        selected_frames = [x[5] for x in paired_frames]
        smplx_params = optimize_pose_sequence(join(seq_dir, "VTS_data.npz"), person_id=2, betas=person2_beta, start_frame_idx=0, end_frame_idx=end_frame_idx, end_link_trans=end_link_trans, save_dir=save_person_dir, cfg=cfg, device=device, selected_frames=selected_frames, handpose_dir=None)
        print("finish optimizing {} person 2, time cost = {}".format(seq_dir, time.time() - start_time))

        # render optimization result
        # TODO: change to directly load the overall result "result.npz"
        print("start rendering results ...")
        start_time = time.time()
        cfg = {
            "render_person1": True,
            "render_person2": True,
            "render_object": True,
        }
        if object_data is None:
            cfg["render_object"] = False
        camera_name = "d455_1"
        camera_intrin_path = join(dataset_dir, "intrinsic/azure2/intrinsic.json")
        camera_pose_path = join(dataset_dir, "extrinsic/azure2/20231101/camera2world.txt")
        frame_range = [0, len(paired_frames)-1 if (end_frame_idx is None) else end_frame_idx-1]  # visualize all frames
        save_path = join(save_dir, "vis_optim.mp4")
        if not object_data is None:
            object_data["obj2world"] = object_data["obj2world"][frame_range[0] : frame_range[1] + 1]
        M = -1 if (save_folder_name.find("_VPoser") > -1) else 50
        render_SMPLX_sequence(seq_dir, camera_name, camera_intrin_path, camera_pose_path, frame_range, M=M, save_path=save_path, cfg=cfg, object_data=object_data, paired_frames=paired_frames, render_contact=False, visualize_head_orientation=False, video_writer=videoWriter, device=device, result_foldername=save_folder_name, stack_on_vision=False)
        videoWriter.release()
        print("finish rendering results, time cost = {}".format(time.time() - start_time))
