
import sys
sys.path.append("/home/liuyun/HHO-dataset/data_processing/utils")
sys.path.append("/home/liuyun/HHO-dataset/data_processing/")
import numpy as np
from utils.HOI_retarget_experiment_v import parse_args
from utils.HOI_retarget_new import compute_contact_info, HOI_retargeting_given_target_contact
from utils.VTS_object import get_obj_info
from utils.retargeting_visualization import HOI_visualization
from utils.load_smplx_params import load_multiperson_smplx_params
from optimization.bvh2smplx import create_SMPLX_model
import math
import os
import trimesh
import torch
from typing import Literal
import random

def transform(params):
    (roll, pitch, yaw, x, y, z) = params
    roll_rad = np.radians(roll)
    pitch_rad = np.radians(pitch)
    yaw_rad = np.radians(yaw)

    rotation_matrix = np.array([
        [np.cos(yaw_rad) * np.cos(pitch_rad),
         np.cos(yaw_rad) * np.sin(pitch_rad) * np.sin(roll_rad) - np.sin(yaw_rad) * np.cos(roll_rad),
         np.cos(yaw_rad) * np.sin(pitch_rad) * np.cos(roll_rad) + np.sin(yaw_rad) * np.sin(roll_rad)],
        [np.sin(yaw_rad) * np.cos(pitch_rad),
         np.sin(yaw_rad) * np.sin(pitch_rad) * np.sin(roll_rad) + np.cos(yaw_rad) * np.cos(roll_rad),
         np.sin(yaw_rad) * np.sin(pitch_rad) * np.cos(roll_rad) - np.cos(yaw_rad) * np.sin(roll_rad)],
        [-np.sin(pitch_rad),
         np.cos(pitch_rad) * np.sin(roll_rad),
         np.cos(pitch_rad) * np.cos(roll_rad)]
    ])

    translation_vector = np.array([x, y, z])

    transform_matrix = np.eye(4)
    transform_matrix[:3, :3] = rotation_matrix
    transform_matrix[:3, 3] = translation_vector

    return transform_matrix

# noise = [(0, 0, 0, 0, 0, 0),
#          (20, 0, 0, 0, 0, 0),
#          (40, 0, 0, 0, 0, 0),
#          (60, 0, 0, 0, 0, 0),
#          (0, 20, 0, 0, 0, 0),
#          (0, 40, 0, 0, 0, 0),
#          (0, 60, 0, 0, 0, 0),
#          (0, 0, 20, 0, 0, 0),
#          (0, 0, 40, 0, 0, 0),
#          (0, 0, 60, 0, 0, 0),
#          (0, 0, 0, 0.5, 0, 0),
#          (0, 0, 0, 0.3, 0, 0),
#          (0, 0, 0, 0, 0.5, 0),
#          (0, 0, 0, 0, 0.3, 0),
#          (0, 0, 0, 0, 0, 0.5),
#          (0, 0, 0, 0, 0, 0.3)
#          ]


def gen():
    noise = []
    a = [20, 40, 60]
    b = [0.2, 0.5]
    for first in a:
        for i in range(0, 3):
            n = [0] * 6
            n[i] = first
            noise.append(tuple(n))
    for sec in b:
        for i in range(3, 6):
            n = [0] * 6
            n[i] = sec
            noise.append(tuple(n))
    for _ in range(6):
        idxs = random.sample(range(6), 2)
        n = [0] * 6
        for idx in idxs:
            if idx < 3:
                n[idx] = random.choice(a)
            else:
                n[idx] = random.choice(b)
        noise.append(tuple(n))
    noise = random.sample(noise, 9)
    noise.append((0, 0, 0, 0, 0, 0))
    for _ in range(3):
        idxs = random.sample(range(6), 3)
        n = [0] * 6
        for idx in idxs:
            if idx < 3:
                n[idx] = random.choice(a)
            else:
                n[idx] = random.choice(b)
        noise.append(tuple(n))
    noise = list(set(noise))
    print(noise)
    return noise


def HOI_retargeting(data_dir, save_dir, origin_mesh, obj_name, obj_poses, start_frame, end_frame, cfg_, device,  obj_pose_file, obj_data_path, interval=1):
    os.makedirs(save_dir, exist_ok=True)
    use_pca, num_pca_params = True, 12
    
    multiperson_SMPLX_params = load_multiperson_smplx_params(os.path.join(data_dir, "SMPLX_fitting"), start_frame=start_frame, end_frame=end_frame, device=device)

    vert, face = origin_mesh.vertices, origin_mesh.faces
    
    origin_pose = obj_poses[range(start_frame, end_frame, interval)]
    # print(origin_pose.shape) Nx4x4
    origin_vert_seq = (origin_pose[:, :3, :3] @ vert.T).transpose(0, 2, 1) + np.expand_dims(origin_pose[:, :3, 3], axis=1)
    origin_vert_seq = torch.from_numpy(origin_vert_seq).to(device)
    print(origin_vert_seq.shape)
    end_frame = origin_pose.shape[0]
    
    origin_contact_info = {
        "person1": {"contact": [], "dist": [], "closest_point": []},
        "person2": {"contact": [], "dist": [], "closest_point": []},
    }
    contact_threshould=0.05
    smplx_model = create_SMPLX_model(use_pca=use_pca, num_pca_comps=num_pca_params, batch_size=1, device=device)
    for idx in range(0, origin_pose.shape[0]):
        
        contact, dist, closest_point = compute_contact_info(multiperson_SMPLX_params["person1"], smplx_model, idx, origin_vert_seq[idx], threshould=contact_threshould, device=device)
        origin_contact_info["person1"]["contact"].append(contact.detach().cpu().numpy())
        origin_contact_info["person1"]["dist"].append(dist.detach().cpu().numpy())
        origin_contact_info["person1"]["closest_point"].append(closest_point.detach().cpu().numpy())
        
        # person2 to original obj
        contact, dist, closest_point = compute_contact_info(multiperson_SMPLX_params["person2"], smplx_model, idx, origin_vert_seq[idx], threshould=contact_threshould, device=device)
        origin_contact_info["person2"]["contact"].append(contact.detach().cpu().numpy())
        origin_contact_info["person2"]["dist"].append(dist.detach().cpu().numpy())
        origin_contact_info["person2"]["closest_point"].append(closest_point.detach().cpu().numpy())
        
    for person in origin_contact_info:
        origin_contact_info[person]["contact"] = torch.tensor(origin_contact_info[person]["contact"], dtype=torch.bool).to(device)
        origin_contact_info[person]["dist"] = torch.tensor(origin_contact_info[person]["dist"], dtype=torch.float32).to(device)
        origin_contact_info[person]["closest_point"] = torch.tensor(origin_contact_info[person]["closest_point"], dtype=torch.int64).to(device)
    print("finish preparing contact areas !!!")
    print(origin_contact_info["person1"]["contact"].shape, origin_contact_info["person1"]["dist"].shape, origin_contact_info["person1"]["closest_point"].shape)

    noise = gen()
    for trans in noise:
        noise_dir_name = "noise_" + '_'.join(str(x) for x in trans)
        noise_dir = os.path.join(save_dir, noise_dir_name)
        print("out", noise_dir)
        if os.path.isdir(noise_dir):
            continue
        os.makedirs(noise_dir, exist_ok=True)
        trans_mat = transform(trans)
        new_poses = np.eye(4).reshape(1,4,4).repeat(end_frame, axis=0)
        print(new_poses.shape, trans_mat.shape, origin_pose.shape)
        new_poses[:, :3, :3] = origin_pose[:, :3, :3] @ trans_mat[:3, :3]
        new_poses[:, :3, 3] = origin_pose[:, :3, 3] + trans_mat[:3, 3]
        new_pose_file = os.path.join(noise_dir, "obj_poses_"+noise_dir_name+".npy")
        np.save(new_pose_file, new_poses)
        
        new_vert_seq = (new_poses[:, :3, :3] @ vert.T).transpose(0, 2, 1) + np.expand_dims(new_poses[:, :3, 3], axis=1)
        new_vert_seq = torch.from_numpy(new_vert_seq).to(device)
        result_smplx_params = HOI_retargeting_given_target_contact(multiperson_SMPLX_params, new_vert_seq, None, origin_contact_info, start_frame, end_frame, use_pca, num_pca_params, use_new_obj=False, cfg=cfg_,epoch=1200,  device=device)
        new_human_pose_file = os.path.join(noise_dir, "human_poses_"+noise_dir_name+".npy")
        np.save(new_human_pose_file, result_smplx_params)
        HOI_visualization(new_human_pose_file, new_pose_file, obj_data_path, obj_dataset_dir, data_dir, noise_dir, start_frame, end_frame, save_filename="_vis.mp4", device=device)
    # exit()


if __name__ == "__main__":
    obj_dataset_dir = "/data3/datasets/HHO_object_dataset_final/"
    hho_dataset_dir = "/share/datasets/hhodataset/VTS/"
    cfg = {
            "retarget_person1": True,
            "retarget_person2": True,
    }
    
    args = parse_args()
    dir = args.dataset
    if not os.path.isdir(os.path.join(hho_dataset_dir, dir)):
        print("skipping...", dir)
    for data_dir in os.listdir(os.path.join(hho_dataset_dir, dir)):
        try:
            if not os.path.isfile(os.path.join(hho_dataset_dir, dir, data_dir, 'aligned_objposes.npy')):
                print("skipping...")
                continue
            
            data_dir = os.path.join(hho_dataset_dir, dir, data_dir)

            obj_name, obj_data_path = get_obj_info(data_dir, obj_dataset_dir)
            print(obj_name, obj_data_path)
            
            obj_name = str(obj_name).lower()
            
            # if not (("chair" in obj_name) or ("desk" in obj_name)):
            #     print(f"skipping {obj_name}...at {data_dir}")
            #     continue
            
            device = args.device
            start_frame = 0
            obj_pose_file = os.path.join(data_dir, 'aligned_objposes.npy')
            obj_poses = np.load(obj_pose_file, allow_pickle=True)
            end_frame = len(obj_poses)
            print(data_dir, "[", start_frame, end_frame, ")")
            
            HOI_retargeting_save_dir = os.path.join(data_dir, "noise_retargeting")
                        
            origin_mesh = trimesh.load_mesh(obj_data_path)
            
            new_obj_poses = obj_poses
            
            retargeting_result = HOI_retargeting(data_dir, HOI_retargeting_save_dir, origin_mesh, obj_name, new_obj_poses, start_frame, end_frame, cfg, device, obj_pose_file, obj_data_path)
        except Exception as e:
            print(data_dir, str(e))
            continue
