'''
estimate object pose in camera coordinates.
transfer grasp points by nearest neigbhor searching.
'''
import argparse, os, yaml, sys, pickle
import numpy as np
import torch
import torch.nn.functional as F
import time
import trimesh
sys.path.append('./')
sys.path.append('./dif')
from lib.fk.FK_layer import FK_layer
from dif.dif_net import DeformedImplicitField
from shape_encoder.model import ShapeEncoder
from dif.sdf_meshing import create_mesh

from pose_estimator.data import PartPointsDatset
from pose_estimator.model import DeformNet
from pose_estimator.utils import estimateSimilarityTransform


parser = argparse.ArgumentParser()
parser.add_argument('--data_root', type=str, default='datasets')
parser.add_argument('--category', type=str, default='mug')
parser.add_argument('--mode', type=str, default='eval')
parser.add_argument('--num_points', type=int, default=1024)
parser.add_argument('--shape_model_path', type=str, default='')
parser.add_argument('--pose_model_path', type=str, default='')
parser.add_argument('--resolution', type=int, default=64)
parser.add_argument("--batch_size", type=int, default=1)
parser.add_argument("--grasp_id", type=str, default = "")
parser.add_argument("--vis", action="store_true", default=False)
opt = parser.parse_args()


def transfer_grasp_points_to_Template_Field(category, dif_model, grasp_id=1):
    
    with open('cfg/class_cfg.yml','r') as stream: 
            class_cfg = yaml.safe_load(stream)
            
    src_inst = class_cfg[category]['src_inst_name']
    
    label_file_path = f"grasp_label/{category}/train/{src_inst}/0/label_grasp_{grasp_id}.npz"
    label = np.load(label_file_path)
    palm_q = label['palm_q'][[3,0,1,2]]       # [x,y,z,w] -> [w,x,y,z]
    base = np.concatenate([label['palm_t'], palm_q], axis=0)
    rotations = label['joints'][2:24]
    rotations = torch.FloatTensor(rotations).reshape(1, -1).cuda()
    base      = torch.FloatTensor(base).reshape(1, -1).cuda()
    model = FK_layer(base, rotations)
    model.to('cuda')
    key_pts, hand_pts = model()
    hand_pts /= 0.5 * label["obj_scale"]
    # get dif's code of source object
    with open(os.path.join(opt.data_root, 'gt_codes', '{0}_{1}.pkl'.format(category, 'train')), 'rb') as f:
        codes = pickle.load(f)
    code = torch.from_numpy(codes[f'{src_inst}/0']).cuda()
    hand_pts_template = dif_model.get_template_coords(hand_pts, code) # [1, hand pts num, 3]
    hand_pts_sdf = dif_model.get_template_field(hand_pts_template) # [1, hand pts num, 1]
    
    inner_idx = torch.norm(hand_pts_template, dim=2)<0.99
    contact_index = torch.abs(hand_pts_sdf[:,:,0])<5e-3
    non_contact_index = hand_pts_sdf[:,:,0]>5e-2
    
    contact_index = contact_index & inner_idx
    non_contact_index = non_contact_index & inner_idx
    contact_index = torch.where(contact_index == True)[1]
    non_contact_index = torch.where(non_contact_index == True)[1]
    contact_pts_template = hand_pts_template[:, contact_index]
    inner_idx = inner_idx.detach().cpu().numpy()
    contact_index = contact_index.detach().cpu().numpy()
    non_contact_index = non_contact_index.detach().cpu().numpy()
    
    contact_info_file_path = f"grasp_label/{category}/train/{src_inst}/0/label_grasp_{grasp_id}_contact_info"
    np.savez(contact_info_file_path, contact_idx=contact_index, non_contact_idx=non_contact_index)
    return contact_pts_template, label

def get_query_points(N=64):
    voxel_origin = [-1, -1, -1]
    voxel_size = 2.0 / (N - 1)
    overall_index = torch.arange(0, N ** 3, 1, out=torch.LongTensor())
    samples = torch.zeros(N ** 3, 3)
    # transform first 3 columns to be the x, y, z index
    samples[:, 2] = overall_index % N
    samples[:, 1] = (overall_index.long() // N) % N
    samples[:, 0] = ((overall_index.long() // N) // N) % N
    # transform first 3 columns to be the x, y, z coordinate
    samples[:, 0] = (samples[:, 0] * voxel_size) + voxel_origin[2]
    samples[:, 1] = (samples[:, 1] * voxel_size) + voxel_origin[1]
    samples[:, 2] = (samples[:, 2] * voxel_size) + voxel_origin[0]
    samples.requires_grad = False
    return samples

if __name__ == '__main__':
    opt.dif_config = 'dif/configs/generate/{0}.yml'.format(opt.category)
    print(opt)

    mean_shapes = np.load('pose_estimator/assets/mean_points_emb.npy')
    CLASS_MAP_FOR_CATEGORY = {'bottle':1, 'bowl':2, 'camera':3, 'can':4, 'laptop':5, 'mug':6}
    prior = mean_shapes[CLASS_MAP_FOR_CATEGORY[opt.category]-1]
    prior = torch.from_numpy(prior).float().cuda()
    
    dataset = PartPointsDatset(opt.data_root, opt.category, opt.mode, opt.num_points)
    dataloader = torch.utils.data.DataLoader(dataset, 
                                             batch_size=opt.batch_size, 
                                             shuffle=False, 
                                             num_workers=8, 
                                             pin_memory=True)

    pose_net = DeformNet()
    if opt.pose_model_path == '':
        opt.pose_model_path = f'pose_estimator/output/{opt.category}/checkpoints/model.pth'
    pose_net.load_state_dict(torch.load(opt.pose_model_path))
    pose_net.eval()
    pose_net.cuda()
    
    shape_encoder = ShapeEncoder()
    if opt.shape_model_path == '':
        opt.shape_model_path = f'shape_encoder/output/{opt.category}/checkpoints/model_best.pth'
    shape_encoder.load_state_dict(torch.load(opt.shape_model_path))
    shape_encoder.eval()
    shape_encoder.cuda()
    
    with open(os.path.join(opt.dif_config),'r') as stream: 
        meta_params = yaml.safe_load(stream)
    dif_model = DeformedImplicitField(**meta_params)
    dif_model.load_state_dict(torch.load(meta_params['checkpoint_path']))
    dif_model.cuda()

    # transfer grasp points from labeled model to template field
    g_pts_temp, label = transfer_grasp_points_to_Template_Field(opt.category, dif_model, opt.grasp_id)
    g_pts_inst = np.zeros([g_pts_temp.size()[1], 3])
    
    q_pts = get_query_points(opt.resolution).cuda()[None, ...]

    cnt = 0
    for cam_pcs, gt_labels, file_names, numbers in dataloader:
        
        print(file_names)
        bs = cam_pcs.shape[0]
        cuda_cam_pcs = cam_pcs.cuda()
        cuda_prior = prior.clone().view(1, 1024, 3).repeat(bs, 1, 1)
        assign_mat, deltas = pose_net(cuda_cam_pcs, cuda_prior)
        inst_shape = cuda_prior + deltas
        assign_mat = F.softmax(assign_mat, dim=2)
        f_coords = torch.bmm(assign_mat, inst_shape)  # bs x n_pts x 3
        f_coords = f_coords.detach().cpu().numpy()
        
        # i: invert transform
        cuda_pred_i_sR = torch.zeros([bs, 3, 3], dtype=torch.float32).cuda()
        cuda_pred_s = torch.zeros([bs], dtype=torch.float32).cuda()
        cuda_pred_t = torch.zeros([bs, 1, 3], dtype=torch.float32).cuda()

        i=0 # assume batch size is always 1

        _, _, _, pred_sRT = estimateSimilarityTransform(f_coords[i], cam_pcs[i].numpy())
        if pred_sRT is None:
            pred_sRT = np.identity(4, dtype=float)
        s = np.cbrt(np.linalg.det(pred_sRT[:3, :3]))
        R = pred_sRT[:3, :3] / s
        i_sR = R / s
        cuda_pred_i_sR[i] = torch.from_numpy(i_sR).float().cuda()
        cuda_pred_t[i] = torch.from_numpy(pred_sRT[:3,3]).view(1,3).float().cuda()

        pred_trans = pred_sRT
        pred_trans[:3, :3] /= s
        gt_trans = np.eye(4)
        gt_trans[:3,:3] = gt_labels['rotation'][i].numpy()
        gt_trans[:3,3] = gt_labels['translation'][i].numpy()
        trans_offset = np.linalg.inv(gt_trans) @ pred_trans

        cuda_obj_pcs = torch.bmm(torch.add(cuda_cam_pcs, -cuda_pred_t), cuda_pred_i_sR)
        cuda_pred_codes = shape_encoder(cuda_obj_pcs)

        # !!!!!!!!!! save in 'eval_pc' folder
        save_dir = os.path.join("grasp_data", opt.category, opt.mode + '_pc', file_names[i])
        os.makedirs(save_dir, exist_ok=True)
        # if os.path.exists(os.path.join(save_dir, "grasp_{}_points_on_surface.npz".format(opt.grasp_id))):
        #     continue
        
        mesh_pts = create_mesh(dif_model, filename='', 
                               embedding=cuda_pred_codes[i], 
                               N=opt.resolution,
                               get_color=False)
            

        # TODO: query points be free points or surface points ?
        if True:
            q_pts = torch.FloatTensor(mesh_pts).cuda()[None, ...]

        q_pts_dfm = dif_model.get_template_coords(q_pts, cuda_pred_codes[i])
        print("grasp points on template size is :    {}.".format(g_pts_temp.size()))
        print("deformed points on instance size is : {}.".format(q_pts_dfm.size()))
        
        if opt.vis and cnt == 51:
            
            input_pc = trimesh.PointCloud(cuda_obj_pcs[0].detach().cpu().numpy())
            input_pc.colors = np.array([255,0,0,255])
            pc = trimesh.PointCloud(mesh_pts)
            deform_pc = trimesh.PointCloud(q_pts_dfm[0].detach().cpu().numpy())
            deform_pc.colors = np.array([0,255,0,255])
            trimesh.Scene([input_pc, pc, deform_pc]).show()
        cnt += 1
                
        start_time = time.time()
        for j in range(g_pts_temp.size()[1]):
            index = torch.argmin(torch.norm(q_pts_dfm - g_pts_temp[0, j, :], dim=2), dim=1)
            g_pts_inst[j, :] = q_pts[0, index, :].detach().cpu().numpy()
        print("transfer one grasp on an instance used {} seconds.".format(time.time() - start_time))
        
        if not opt.vis:
            code = cuda_pred_codes[i].detach().cpu().numpy()
            gt_sRT = np.load(os.path.join(opt.data_root, 
                                          'render_pc', 
                                          opt.category, 
                                          opt.mode, 
                                          file_names[i], 
                                          'PC_cam_sRT_{0}.npz'.format(numbers[i])))
            obj_scale = gt_sRT['scale']
            # if opt.category == 'mug' and opt.grasp_id == '3':
            #     obj_scale *= 1.5 # gt scale
            #     s *= 1.5         # pred scale
            np.savez(os.path.join(save_dir, "grasp_{}_points_on_surface".format(opt.grasp_id)), 
                     grasp_points=g_pts_inst, 
                     pred_scale=s,
                     obj_scale=obj_scale,
                     trans_offset=trans_offset,
                     code=code)