import os
import sys
import pdb

sys.path.append(os.getcwd())

import numpy as np
import glob
import pickle as pk
import joblib
import torch
import argparse

from tqdm import tqdm
from uhc.utils.transform_utils import (
    convert_aa_to_orth6d,
    convert_orth_6d_to_aa,
    vertizalize_smpl_root,
    rotation_matrix_to_angle_axis,
    rot6d_to_rotmat,
)
from scipy.spatial.transform import Rotation as sRot
from uhc.smpllib.smpl_mujoco import smpl_to_qpose, SMPL_M_Viewer
from mujoco_py import load_model_from_path, MjSim
from uhc.utils.config_utils.copycat_config import Config
from uhc.envs.humanoid_im import HumanoidEnv
from uhc.utils.tools import get_expert
from uhc.data_loaders.dataset_amass_single import DatasetAMASSSingle
from uhc.smpllib.smpl_parser import SMPLH_Parser
from uhc.utils.flags import flags

np.random.seed(1)
left_right_idx = [
    0,
    2,
    1,
    3,
    5,
    4,
    6,
    8,
    7,
    9,
    11,
    10,
    12,
    14,
    13,
    15,
    17,
    16,
    19,
    18,
    21,
    20,
    23,
    22,
]


def left_to_rigth_euler(pose_euler):
    pose_euler[:, :, 0] = pose_euler[:, :, 0] * -1
    pose_euler[:, :, 2] = pose_euler[:, :, 2] * -1
    pose_euler = pose_euler[:, left_right_idx, :]
    return pose_euler


def flip_smpl(pose, trans=None):
    """
    Pose input batch * 72
    """
    curr_spose = sRot.from_rotvec(pose.reshape(-1, 3))
    curr_spose_euler = curr_spose.as_euler("ZXY", degrees=False).reshape(
        pose.shape[0], 24, 3)
    curr_spose_euler = left_to_rigth_euler(curr_spose_euler)
    curr_spose_rot = sRot.from_euler("ZXY",
                                     curr_spose_euler.reshape(-1, 3),
                                     degrees=False)
    curr_spose_aa = curr_spose_rot.as_rotvec().reshape(pose.shape[0], 24, 3)
    if trans != None:
        pass
        # target_root_mat = curr_spose.as_matrix().reshape(pose.shape[0], 24, 3, 3)[:, 0]
        # root_mat = curr_spose_rot.as_matrix().reshape(pose.shape[0], 24, 3, 3)[:, 0]
        # apply_mat = np.matmul(target_root_mat[0], np.linalg.inv(root_mat[0]))

    return curr_spose_aa.reshape(-1, 72)


def sample_random_hemisphere_root():
    rot = np.random.random() * np.pi * 2
    pitch = np.random.random() * np.pi / 3 + np.pi
    r = sRot.from_rotvec([pitch, 0, 0])
    r2 = sRot.from_rotvec([0, rot, 0])
    root_vec = (r * r2).as_rotvec()
    return root_vec


def sample_seq_length(seq, tran, seq_length=150):
    if seq_length != -1:
        num_possible_seqs = seq.shape[0] // seq_length
        max_seq = seq.shape[0]

        start_idx = np.random.randint(0, 10)
        start_points = [max(0, max_seq - (seq_length + start_idx))]

        for i in range(1, num_possible_seqs - 1):
            start_points.append(i * seq_length + np.random.randint(-10, 10))

        if num_possible_seqs >= 2:
            start_points.append(max_seq - seq_length -
                                np.random.randint(0, 10))

        seqs = [seq[i:(i + seq_length)] for i in start_points]
        trans = [tran[i:(i + seq_length)] for i in start_points]
    else:
        seqs = [seq]
        trans = [tran]
        start_points = []
    return seqs, trans, start_points


def get_random_shape(batch_size):
    shape_params = torch.rand(1, 10).repeat(batch_size, 1)
    s_id = torch.tensor(np.random.normal(scale=1.5, size=(3)))
    shape_params[:, :3] = s_id
    return shape_params


def fix_height(expert, expert_meta, env):
    wbpos = expert["wbpos"]
    wbpos = wbpos.reshape(wbpos.shape[0], 24, 3)
    begin_feet = min(wbpos[0, 4, 2], wbpos[0, 8, 2])
    begin_root = wbpos[0, 0, 2]
    if begin_root < 0.3 and begin_feet > -0.1:
        print(f"Crawling: {expert_meta['seq_name']}")
        return expert

    begin_feet -= 0.015  # Hypter parameter to tune
    qpos = expert["qpos"]
    qpos[:, 2] -= begin_feet
    new_expert = get_expert(qpos, expert_meta, env)
    new_wpos = new_expert["wbpos"]
    new_wpos = new_wpos.reshape(new_wpos.shape[0], 24, 3)
    ground_pene = min(np.min(new_wpos[:, 4, 2]), np.min(new_wpos[:, 8, 2]))
    if ground_pene < -0.15:
        print(
            f"{expert_meta['seq_name']} negative sequence invalid for copycat: {ground_pene}"
        )
        return None
    return new_expert


def count_consec(lst):
    consec = [1]
    for x, y in zip(lst, lst[1:]):
        if x == y - 1:
            consec[-1] += 1
        else:
            consec.append(1)
    return consec


def fix_height_smpl(pose_aa, th_trans, th_betas, gender, seq_name):
    gender = gender.item() if isinstance(gender, np.ndarray) else gender
    if isinstance(gender, bytes):
        gender = gender.decode("utf-8")

    if gender == "neutral":
        smpl_parser = smpl_parser_n
    elif gender == "male":
        smpl_parser = smpl_parser_m
    elif gender == "female":
        smpl_parser = smpl_parser_f
    else:
        print(gender)
        raise Exception("Gender Not Supported!!")

    batch_size = pose_aa.shape[0]
    verts, jts = smpl_parser.get_joints_verts(pose_aa[0:1],
                                              th_betas.repeat((1, 1)),
                                              th_trans=th_trans[0:1])

    # vertices = verts[0].numpy()
    gp = torch.min(verts[:, :, 2])

    if gp > 0.1:
        print(f"Starting too high: {seq_name}")
        return None

    # if gp < 0:
    th_trans[:, 2] -= gp
    verts, jts = smpl_parser.get_joints_verts(pose_aa,
                                              th_betas.repeat((batch_size, 1)),
                                              th_trans=th_trans)

    conseq = count_consec(
        torch.nonzero(torch.sum(jts[:, [10, 11], 2] > 0.2, axis=1) > 1))
    if np.max(conseq) > 30:
        ## Too high
        print(
            f"{seq_name} too high sequence invalid for copycat: {np.max(conseq)}"
        )
        return None
    return th_trans


def process_qpos_list(qpos_list):
    amass_res = {}
    removed_k = []
    pbar = qpos_list
    for (k, v) in pbar:
        # print("=" * 20)
        k = "0-" + k
        seq_name = k
        betas = v["betas"]
        gender = v["gender"]
        amass_fr = v["mocap_framerate"]
        skip = int(amass_fr / target_fr)
        amass_pose = v["poses"][::skip]
        amass_trans = v["trans"][::skip]

        bound = amass_pose.shape[0]
        # if k in amass_occlusion:
        #     issue = amass_occlusion[k]["issue"]
        #     if issue == "sitting" or issue == "airborne":
        #         bound = amass_occlusion[k]["idxes"][
        #             0
        #         ]  # This bounded is calucaled assuming 30 FPS.....
        #         if bound < 10:
        #             print("bound too small", k, bound)
        #             continue
        #     else:
        #         print("issue irrecoverable", k, issue)
        #         continue

        seq_length = amass_pose.shape[0]
        if seq_length < 10:
            continue
        with torch.no_grad():
            pose_aa = torch.tensor(
                amass_pose)[:bound]  # After sampling the bound
            amass_trans = torch.tensor(
                amass_trans[:bound])  # After sampling the bound
            betas = torch.from_numpy(betas)
            batch_size = pose_aa.shape[0]

            # amass_trans = fix_height_smpl(
            #     pose_aa=pose_aa,
            #     th_betas=betas,
            #     th_trans=amass_trans,
            #     gender=gender,
            #     seq_name=k,
            # )
            # if amass_trans is None:
            #     removed_k.append(k)
            #     continue

            pose_seq_6d = convert_aa_to_orth6d(torch.tensor(pose_aa)).reshape(
                batch_size, -1, 6)

            amass_res[seq_name] = {
                "pose_aa": pose_aa.numpy(),
                "pose_6d": pose_seq_6d.numpy(),
                # "qpos": qpos,
                "trans": amass_trans.numpy(),
                "beta": betas.numpy(),
                "seq_name": seq_name,
                "gender": gender,
            }

        if flags.debug and len(amass_res) > 10:
            break
    print(removed_k)
    return amass_res


amass_splits = {
    'vald': ['HumanEva', 'MPI_HDM05', 'SFU', 'MPI_mosh'],
    'test': ['Transitions_mocap', 'SSM_synced'],
    'train': [
        'CMU', 'MPI_Limits', 'TotalCapture', 'Eyes_Japan_Dataset', 'KIT',
        'BML', 'EKUT', 'TCD_handMocap', "BMLhandball", "DanceDB"
    ]  #ACCAD
}

amass_split_dict = {}
for k, v in amass_splits.items():
    for d in v:
        amass_split_dict[d] = k

if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--debug", action="store_true", default=False)
    args = parser.parse_args()

    np.random.seed(0)
    flags.debug = args.debug
    amass_base = "/hdd/zen/data/ActBound/AMASS/"
    take_num = "copycat_take5_5"
    # amass_cls_data = pk.load(open(os.path.join(amass_base, "amass_class.pkl"), "rb"))
    amass_seq_data = {}
    seq_length = -1
    cfg = Config(cfg_id="copycat_30", create_dirs=False)

    data_loader = DatasetAMASSSingle(cfg.data_specs, data_mode="test")
    # init_expert = data_loader.sample_seq()
    # env = HumanoidEnv(
    #     cfg, init_expert=init_expert, data_specs=cfg.data_specs, mode="test"
    # )

    # target_frs = [20,30,40] # target framerate
    # target_frs = [30]  # target framerate
    target_fr = 30
    video_annot = {}
    counter = 0
    seq_counter = 0
    amass_db = joblib.load("/hdd/zen/data/ActBound/AMASS/amass_db_smplx.pt")
    amass_occlusion = joblib.load(
        "/hdd/zen/data/ActBound/AMASS/amass_copycat_occlusion.pkl")

    model_file = f"assets/mujoco_models/humanoid_smpl_neutral_mesh.xml"
    humanoid_model = load_model_from_path(model_file)

    # key = "CMU_36_36_05_poses"
    # amass_db = {key: amass_db[key]}

    qpos_list = list(amass_db.items())
    np.random.seed(0)
    np.random.shuffle(qpos_list)
    smpl_parser_n = SMPLH_Parser(model_path="data/smpl",
                                 gender="neutral",
                                 use_pca=False,
                                 create_transl=False)
    smpl_parser_m = SMPLH_Parser(model_path="data/smpl",
                                 gender="male",
                                 use_pca=False,
                                 create_transl=False)
    smpl_parser_f = SMPLH_Parser(model_path="data/smpl",
                                 gender="female",
                                 use_pca=False,
                                 create_transl=False)

    # import ipdb; ipdb.set_trace()
    ## Debug
    amass_seq_data = process_qpos_list(qpos_list)
    if flags.debug:
        amass_output_file_name = "/hdd/zen/data/ActBound/AMASS/amass_{}_test.pkl".format(
            take_num)
    else:
        amass_output_file_name = "/hdd/zen/data/ActBound/AMASS/amass_{}.pkl".format(
            take_num)
    print(amass_output_file_name, len(amass_seq_data))
    joblib.dump(amass_seq_data, open(amass_output_file_name, "wb"))

    # train_data = {}
    # test_data = {}
    # vald_data = {}
    # for k, v in amass_seq_data.items():
    #     start_name = k.split("-")[1]
    #     for dataset_key in amass_split_dict.keys():
    #         if start_name.startswith(dataset_key):
    #             split = amass_split_dict[dataset_key]
    #             if split == "train":
    #                 train_data[k] = v
    #             elif split == "test":
    #                 test_data[k] = v
    #             elif split == "vald":
    #                 vald_data[k] = v;

    # joblib.dump(train_data, f"/hdd/zen/data/ActBound/AMASS/amass_{take_num}_train.pkl")
    # joblib.dump(test_data, f"/hdd/zen/data/ActBound/AMASS/amass_{take_num}_test.pkl")
    # joblib.dump(vald_data,
    #             f"/hdd/zen/data/ActBound/AMASS/amass_{take_num}_vald.pkl")
