import os
from os.path import join, isfile
import sys
sys.path.append("..")
import numpy as np
import pickle
import torch
from torch import nn
import pytorch3d
import pytorch3d.io as IO
import trimesh
from smplx import smplx
from smplx.transfer_model.config import parse_args
import argparse
# import cv2
# import imageio
# from utils.txt2intrinsic import txt2intrinsic
# from utils.pyt3d_wrapper import Pyt3DWrapper
# from utils.avi2depth import avi2depth
# from utils.time_align import time_align
# from utils.process_timestamps import txt_to_paried_frameids, paired_frameids_to_txt
# from utils.contact import compute_contact
# from utils.VTS_object import get_obj_info, get_obj_name_correspondance
from utils.load_smplx_params import load_multiperson_smplx_params
# from utils.object_retargeting import obj_retargeting
from utils.process_transformation import np_mat2axangle, torch_mat2axangle
from smplx.transfer_model.transfer import transfer_to_smpl

from torch.utils.data import DataLoader

# pwd = os.path.dirname(os.path.realpath(__file__))
# sys.path.append(os.path.join(pwd, "..", "VIBE"))
# sys.path.append(os.path.join(pwd, "..", "smplx"))
# from lib.core.config import *
# from lib.utils.utils import prepare_output_dir
# from lib.models import  MotionDiscriminator




def get_smplx_sequence(data_dir, output_file, start_frame, end_frame):
    num_pca_comps = 12  # hand PCA dimension
    smplx_model = smplx.create("/share/human_model/models", model_type="smplx", gender="neutral", use_face_contour=False, num_betas=10, num_expression_coeffs=10, ext="npz", use_pca=True, num_pca_comps=num_pca_comps, flat_hand_mean=True)
    smplx_model.to(device)
    data = join("/share/datasets/HHO_dataset/data/", data_dir)
    for j in range(start_frame, end_frame, 50):
        multiperson_SMPLX_params = load_multiperson_smplx_params(join(data, "SMPLX_fitting"), start_frame=j, end_frame=j+50, device=device)
        origin_p1_SMPLX_params = multiperson_SMPLX_params["person1"]
        origin_p2_SMPLX_params = multiperson_SMPLX_params["person2"]
        end_num = min(50, end_frame - j)
        for i in range(0, end_num):
            # ori video
            ori_p1_beta, ori_p1_expression, ori_p1_body_pose, ori_p1_transl, ori_p1_global_orient, ori_p1_left_hand_pose, ori_p1_right_hand_pose = origin_p1_SMPLX_params["betas"][i].unsqueeze(0), origin_p1_SMPLX_params["expression"][i].unsqueeze(0), origin_p1_SMPLX_params["body_pose"][i].unsqueeze(0), origin_p1_SMPLX_params["transl"][i].unsqueeze(0), origin_p1_SMPLX_params["global_orient"][i].unsqueeze(0), origin_p1_SMPLX_params["left_hand_pose"][i].unsqueeze(0), origin_p1_SMPLX_params["right_hand_pose"][i].unsqueeze(0)
            
            
            ori_p2_beta, ori_p2_expression, ori_p2_body_pose, ori_p2_transl, ori_p2_global_orient, ori_p2_left_hand_pose, ori_p2_right_hand_pose = origin_p2_SMPLX_params["betas"][i].unsqueeze(0), origin_p2_SMPLX_params["expression"][i].unsqueeze(0), origin_p2_SMPLX_params["body_pose"][i].unsqueeze(0), origin_p2_SMPLX_params["transl"][i].unsqueeze(0), origin_p2_SMPLX_params["global_orient"][i].unsqueeze(0), origin_p2_SMPLX_params["left_hand_pose"][i].unsqueeze(0), origin_p2_SMPLX_params["right_hand_pose"][i].unsqueeze(0)
            
            # print(ori_p1_beta.shape, ori_p1_expression.shape)
            ori_p1_model = smplx_model(betas=ori_p1_beta, expression=ori_p1_expression, body_pose=ori_p1_body_pose, transl=ori_p1_transl, global_orient=ori_p1_global_orient, left_hand_pose=ori_p1_left_hand_pose, right_hand_pose=ori_p1_right_hand_pose, return_verts=True)
            
            ori_p2_model = smplx_model(betas=ori_p2_beta, expression=ori_p2_expression, body_pose=ori_p2_body_pose, transl=ori_p2_transl, global_orient=ori_p2_global_orient, left_hand_pose=ori_p2_left_hand_pose, right_hand_pose=ori_p2_right_hand_pose, return_verts=True)
            
            ori_p1_mesh = trimesh.Trimesh(vertices=ori_p1_model.vertices.detach().cpu().numpy()[0], faces=ori_p1_model.faces.detach().cpu().numpy())
            
            ori_p2_mesh = trimesh.Trimesh(vertices=ori_p2_model.vertices.detach().cpu().numpy()[0], faces=ori_p2_model.faces.detach().cpu().numpy())

            ori_p1_output = trimesh.exchange.ply.export_ply(ori_p1_mesh)
            ori_p2_output = trimesh.exchange.ply.export_ply(ori_p2_mesh)
            os.makedirs(join(output_file, "p1"), exist_ok=True)
            os.makedirs(join(output_file, "p2"), exist_ok=True)
            with open(join(output_file, "p1", f"{j * 50 + i}.ply"), "wb+") as f:
                f.write(ori_p1_output)
            with open(join(output_file, "p2", f"{j * 50 + i}.ply"), "wb+") as f:
                f.write(ori_p2_output)

def get_input_for_discriminator(output_file):
    files = os.listdir(output_file)
    theta = []
    for file in files:
        file_path = join(output_file, file)
        if os.path.isfile(file_path) and not file_path.endswith("theta.pkl") and file_path.endswith(".pkl"):
            with open(file_path, 'rb') as f:
                result = pickle.load(f, encoding='bytes')
                # print(result.keys())
                body_pose = result['body_pose'].detach().cpu().numpy()  # torch.Size([1, 23, 3, 3])
                global_orient = result['global_orient'].detach().cpu().numpy()  # torch.Size([1, 1, 3, 3])
                # print(global_orient.shape)
                betas = result['betas'].detach().cpu().numpy()  # torch.Size([1, 10])
                body_pose = np.concatenate([body_pose, global_orient], axis=1) # 1 * 24 * 3 * 3
                body_pose = np_mat2axangle(body_pose[0]).reshape(1, -1)  
                cam = np.array([1., 0., 0.])[None, ...]
                theta.append(np.concatenate([cam, body_pose, betas], axis=1).reshape(-1))
    theta = np.array(theta)
    
    return theta.reshape(1, -1, 85)
    

def motion_discriminator(theta):
    pretrained_file = "/home/liuyun/codebases/VIBE-master/data/vibe_data/vibe_model_wo_3dpw.pth.tar"
    cfg_file = "/home/liuyun/codebases/VIBE-master/configs/config_test_motion_discriminator.yaml"
    cfg = update_cfg(cfg_file)
    cfg = prepare_output_dir(cfg, cfg_file)
    motion_discriminator = MotionDiscriminator(
        rnn_size=cfg.TRAIN.MOT_DISCR.HIDDEN_SIZE,
        input_size=69,
        num_layers=cfg.TRAIN.MOT_DISCR.NUM_LAYERS,
        output_size=1,
        feature_pool=cfg.TRAIN.MOT_DISCR.FEATURE_POOL,
        attention_size=None if cfg.TRAIN.MOT_DISCR.FEATURE_POOL != 'attention' else cfg.TRAIN.MOT_DISCR.ATT.SIZE,
        attention_layers=None if cfg.TRAIN.MOT_DISCR.FEATURE_POOL != 'attention' else cfg.TRAIN.MOT_DISCR.ATT.LAYERS,
        attention_dropout=None if cfg.TRAIN.MOT_DISCR.FEATURE_POOL != 'attention' else cfg.TRAIN.MOT_DISCR.ATT.DROPOUT
    ).to(device)
    ckpt = torch.load(pretrained_file)
    motion_discriminator.load_state_dict(ckpt['disc_motion_state_dict'])
    HHO_smpl_motion_seq = torch.from_numpy(theta).to(torch.float32).to(device)  # (batch_size, seq_len, 85), 69 = SMPL theta[6:75]
    HHO_smpl_motion_seq = HHO_smpl_motion_seq[:, :, 6:75]
    return motion_discriminator(HHO_smpl_motion_seq)



if __name__ == '__main__':
    log = open("./smplx2smpl_20230805_1.log", "a")
    for i in range(55):
        try:
            device = torch.device("cuda:6")
            print(device, file=log, flush=True)
            data_dir = "20230805_1/" + str(i).rjust(3, "0") + "/"
            print(data_dir, file=log, flush=True)
            output_file = "../smplx/smplx_output/" + data_dir
            output_file_p1 = output_file + "p1/"
            output_file_p2 = output_file + "p2/"
            exp_cfg = "../smplx/config_files/smplx2smpl.yaml"
            start_frame = 150
            end_frame = 166

            exp_cfg = parse_args()
            exp_cfg.output_folder = output_file_p1
            exp_cfg.datasets.mesh_folder.data_folder = output_file_p1
            exp_cfg["use_cuda"] = True
            get_smplx_sequence(data_dir, output_file, start_frame, end_frame)
            
            transfer_to_smpl(exp_cfg)
            theta = get_input_for_discriminator(output_file_p1)
            print(theta.shape)
            # result = motion_discriminator(theta)
            # print(result)
            pickle.dump(theta, open(join(output_file_p1, "theta.pkl"), "wb+") )
            
            

            exp_cfg.output_folder = output_file_p2
            exp_cfg.datasets.mesh_folder.data_folder = output_file_p2
            transfer_to_smpl(exp_cfg)
            theta = get_input_for_discriminator(output_file_p2)
            print(theta.shape)
            # result = motion_discriminator(theta)
            pickle.dump(theta, open(join(output_file_p2, "theta.pkl"), "wb+") )
        except Exception as e:
            print(str(e), file=log, flush=True)
        finally:
            continue
    log.close()
            

