import pickle, argparse, os, yaml, sys
import numpy as np
import torch.nn.functional as F
import trimesh
import torch
from dif.dif_net import DeformedImplicitField
from shape_encoder.model import ShapeEncoder
from dif.sdf_meshing import create_mesh
import open3d as o3d
import copy

def get_args():
    parser = argparse.ArgumentParser()
    # for debug
    parser.add_argument('--data_dir', type=str, default='rs_imgs')
    parser.add_argument('--img_num', type=int, default=17)
    parser.add_argument('--pkl_root', type=str, default='panda_grasp_data')
    parser.add_argument('--resolution', type=int, default=64)
    parser.add_argument('--vis', action="store_true", default=False)
    parser.add_argument("--global_feat", action='store_true', default=True) # useless options
    parser.add_argument("--feat_trans", action='store_true', default=False)
    parser.add_argument("--bn", action='store_true', default=False)
    opt = parser.parse_args()
    return opt


def q2R(pred_r):
    bs, _ = pred_r.size()
    pred_r = pred_r / (torch.norm(pred_r, dim=1).view(bs, 1))
    R_martix = torch.cat(((1.0 - 2.0*(pred_r[:, 2]**2 + pred_r[:, 3]**2)).view(bs, 1),\
            (2.0*pred_r[:, 1]*pred_r[:, 2] - 2.0*pred_r[:, 0]*pred_r[:, 3]).view(bs, 1), \
            (2.0*pred_r[:, 0]*pred_r[:, 2] + 2.0*pred_r[:, 1]*pred_r[:, 3]).view(bs, 1), \
            (2.0*pred_r[:, 1]*pred_r[:, 2] + 2.0*pred_r[:, 3]*pred_r[:, 0]).view(bs, 1), \
            (1.0 - 2.0*(pred_r[:, 1]**2 + pred_r[:, 3]**2)).view(bs, 1), \
            (-2.0*pred_r[:, 0]*pred_r[:, 1] + 2.0*pred_r[:, 2]*pred_r[:, 3]).view(bs, 1), \
            (-2.0*pred_r[:, 0]*pred_r[:, 2] + 2.0*pred_r[:, 1]*pred_r[:, 3]).view(bs, 1), \
            (2.0*pred_r[:, 0]*pred_r[:, 1] + 2.0*pred_r[:, 2]*pred_r[:, 3]).view(bs, 1), \
            (1.0 - 2.0*(pred_r[:, 1]**2 + pred_r[:, 2]**2)).view(bs, 1)), dim=1).contiguous().view(bs, 3, 3)
    return R_martix

def load_model_encoder(root, cate):
    shape_encoder = ShapeEncoder()
    if cate == 'mug':
        shape_model_path = 'shape_encoder/checkpoints/{}_new.pth'.format(cate)
    else:
        shape_model_path = 'shape_encoder/checkpoints/{}.pth'.format(cate)
    shape_encoder.load_state_dict(torch.load(os.path.join(root, shape_model_path)))
    shape_encoder.cuda()
    shape_encoder.eval()
    return shape_encoder

def load_model_dif(root, meta_params):
    # define DIF-Net
    model = DeformedImplicitField(**meta_params)
    model.load_state_dict(torch.load(os.path.join(root, meta_params['checkpoint_path'])))
    return model.cuda()

def load_grasp_source(pkl_root, cate, cfg):
    def filter_grasp(panda_grasp_params, cate, cfg):
        '''
        remove some unresonable grasp poses, eg. grasp pose from down to up.

        Specifically:
            Mug: remove from down to up, that is gripper z axis and mug y axis are at acute angles.
                 remove grasp mug handle.
                 In other words, only save from up to down grasp, and grasp mug body.
            Bottle: remove grasp bottle head. concertate on grasping center of body.
            Bowl: pass.
        '''
        filtered_grasps = []
        if cate == 'mug':
            if cfg['type'] == 'all':
                filtered_grasps = panda_grasp_params['grasp_params']
            elif cfg['type'] == 'rim': # most grasp poses around rim of mug are from top to down
                for g in panda_grasp_params['grasp_params']:
                    if np.sum(g['approach_vector']*np.array([0,1,0])) < -np.cos(30/180 * np.pi): 
                        filtered_grasps.append(g)
            elif cfg['type'] == 'handle':
                for g in panda_grasp_params['grasp_params']:
                    hori = g['left_points'] - g['right_points']
                    hori = hori / np.linalg.norm(hori)
                    if np.sum(g['approach_vector']*np.array([0,1,0])) < -np.cos(45/180 * np.pi) and \
                       np.sum(g['approach_vector']*np.array([0,1,0])) > -np.cos(15/180 * np.pi) and \
                        np.abs(np.sum(hori*np.array([0,0,1]))) > np.cos(10/180 * np.pi) \
                            and g['left_points'][0] > 0.1 and g['right_points'][0] > 0.1:
                        filtered_grasps.append(g)
            elif cfg['type'] == 'body':
                pass
            else:
                raise ValueError('No type \'{}\' in grasp source of \'{}\'!'.format(cfg['type'], cate))
        elif cate == 'bottle':
            if cfg['type'] == 'all':
                filtered_grasps = panda_grasp_params['grasp_params']
            elif cfg['type'] == 'body':
                for g in panda_grasp_params['grasp_params']:
                    grasp_center_y = (g['left_points'][1] + g['right_points'][1])/2
                    if grasp_center_y >=0 and grasp_center_y<0.2:
                        filtered_grasps.append(g)
            elif cfg['type'] == 'cap':
                for g in panda_grasp_params['grasp_params']:
                    grasp_center_y = (g['left_points'][1] + g['right_points'][1])/2
                    if grasp_center_y > 0.5:
                        filtered_grasps.append(g)
            else:
                raise ValueError('No type \'{}\' in grasp source of \'{}\'!'.format(cfg['type'], cate))
        elif cate == 'bowl':
            if cfg['type'] == 'all':
                filtered_grasps = panda_grasp_params['grasp_params']
            else:
                raise ValueError('No type \'{}\' in grasp source of \'{}\'!'.format(cfg['type'], cate))
        else:
            raise ValueError('Category {} is not implemented!'.format(cate))

        filtered_panda_grasp_params = copy.deepcopy(panda_grasp_params)
        if len(filtered_grasps) > cfg['max_num']:
            # Sample grasp params
            filtered_grasps = np.random.choice(filtered_grasps, size=cfg['max_num'], replace=False)
        filtered_panda_grasp_params['grasp_params'] = filtered_grasps

        return filtered_panda_grasp_params

    temp_grasp_path = '{}/{}.pkl'.format(pkl_root, cate, cfg)
    with open(temp_grasp_path, 'rb') as f:
        panda_file_info_temp = pickle.load(f)

    # filter grasp params
    panda_file_info_temp = filter_grasp(panda_file_info_temp, cate, cfg)
    print('[Load Grasp Sources] category \'{}\' load grasp source, including {} grasps.'.format(cate, len(panda_file_info_temp['grasp_params'])))
    return panda_file_info_temp

def transfer_grasp(pose_results, shape_encoder, dif_model, panda_file_info_temp, icp=False, resolution=64, show_recon=False):
    
    obj_pcs = pose_results['obj_pcs']
    assert obj_pcs.shape[1] == 3, 'Input \'Obj_pcs\' shape must be [N,3]!'
    cuda_obj_pcs = torch.from_numpy(obj_pcs).unsqueeze(0).float().cuda()
    
    cuda_pred_codes = shape_encoder(cuda_obj_pcs)

    i = 0
    mesh_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'recon_meshes', '{}_mesh'.format(pose_results['category']))
    arr_mesh_points = create_mesh(dif_model, filename=mesh_path, embedding=cuda_pred_codes[i], N=resolution)

    if icp:
        icp_thres = {'bottle':0.005, 'bowl':0.001, 'camera':0.03, 'can':0.005, 'laptop':0.005, 'mug':0.01}
        pc_cam = o3d.geometry.PointCloud()
        pc_cam.points = o3d.utility.Vector3dVector(pose_results['cam_pcs'])
        pc_obj = o3d.geometry.PointCloud()
        pc_obj.points = o3d.utility.Vector3dVector(arr_mesh_points)
        sRT = copy.deepcopy(pose_results['sRT'])
        
        sRT[:3, :3] = sRT[:3, :3] * 0.5
        trans_method = o3d.pipelines.registration.TransformationEstimationPointToPoint()
        trans_method.with_scaling = True
        reg_p2p = o3d.pipelines.registration.registration_icp(
                pc_obj, pc_cam, icp_thres[pose_results['category']], sRT,
                trans_method,
                o3d.pipelines.registration.ICPConvergenceCriteria(max_iteration=100))
        sRT = copy.deepcopy(reg_p2p.transformation)
        sRT[:3, :3] = sRT[:3, :3] * 2
        s = np.cbrt(np.linalg.det(sRT[:3, :3]))
        pose_results['pred_s'] = s
        RT = sRT
        RT[:3,:3] = RT[:3,:3] / s
        pose_results['RT_icp'] = RT

    if show_recon:
        pc = trimesh.PointCloud(cuda_obj_pcs[i].detach().cpu().numpy(), colors=[0x98, 0x9F, 0xD9, 211])
        mesh = trimesh.load(mesh_path + '.ply')
        trans = np.eye(4)
        mesh.apply_scale(0.5).apply_transform(trans)
        trimesh.Scene([pc, mesh]).show()
    else:
        tensor_inst_points = torch.FloatTensor(arr_mesh_points).cuda()
        tensor_deform_points = dif_model.get_template_coords(tensor_inst_points, cuda_pred_codes[i])
        deform_points = tensor_deform_points.squeeze().detach().cpu().numpy().astype(np.float32)

        panda_file_info_ins = {'pred_s': pose_results['pred_s'],
                               'code': cuda_pred_codes[i].detach().cpu().numpy(), 
                               'grasp_params':[]}
        for index, grasp_info in enumerate(panda_file_info_temp['grasp_params']):
            index_left = np.argmin(np.linalg.norm(deform_points - grasp_info['left_points'], axis=1))
            index_right = np.argmin(np.linalg.norm(deform_points - grasp_info['right_points'], axis=1))
            if index_left == index_right:
                continue
            obj_left_points = arr_mesh_points[index_left]
            obj_right_points = arr_mesh_points[index_right]
            panda_file_info_ins['grasp_params'].append({
                'left_points':obj_left_points,
                'right_points':obj_right_points,
                'approach_vector':grasp_info['approach_vector'],
                'depth':grasp_info['depth'],
            })

    return panda_file_info_ins, pose_results

def generate_mug_body_grasp(pose_results, shape_encoder, dif_model, panda_file_info_temp, icp=False, resolution=64, show_recon=False):
    
    obj_pcs = pose_results['obj_pcs']
    assert obj_pcs.shape[1] == 3, 'Input \'Obj_pcs\' shape must be [N,3]!'
    cuda_obj_pcs = torch.from_numpy(obj_pcs).unsqueeze(0).float().cuda()
    
    cuda_pred_codes = shape_encoder(cuda_obj_pcs)

    i = 0
    mesh_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'recon_meshes', '{}_mesh'.format(pose_results['category']))
    arr_mesh_points = create_mesh(dif_model, filename=mesh_path, embedding=cuda_pred_codes[i], N=resolution)

    if icp:
        icp_thres = {'bottle':0.005, 'bowl':0.001, 'camera':0.03, 'can':0.005, 'laptop':0.005, 'mug':0.03}
        pc_cam = o3d.geometry.PointCloud()
        pc_cam.points = o3d.utility.Vector3dVector(pose_results['cam_pcs'])
        pc_obj = o3d.geometry.PointCloud()
        pc_obj.points = o3d.utility.Vector3dVector(arr_mesh_points)
        sRT = copy.deepcopy(pose_results['sRT'])
        
        sRT[:3, :3] = sRT[:3, :3] * 0.5
        trans_method = o3d.pipelines.registration.TransformationEstimationPointToPoint()
        trans_method.with_scaling = True
        reg_p2p = o3d.pipelines.registration.registration_icp(
                pc_obj, pc_cam, icp_thres[pose_results['category']], sRT,
                trans_method,
                o3d.pipelines.registration.ICPConvergenceCriteria(max_iteration=100))
        sRT = copy.deepcopy(reg_p2p.transformation)
        sRT[:3, :3] = sRT[:3, :3] * 2
        s = np.cbrt(np.linalg.det(sRT[:3, :3]))
        pose_results['pred_s'] = s
        RT = sRT
        RT[:3,:3] = RT[:3,:3] / s
        pose_results['RT_icp'] = RT

    if show_recon:
        pc = trimesh.PointCloud(cuda_obj_pcs[i].detach().cpu().numpy(), colors=[0x98, 0x9F, 0xD9, 211])
        mesh = trimesh.load(mesh_path + '.ply')
        trans = np.eye(4)
        mesh.apply_scale(0.5).apply_transform(trans)
        trimesh.Scene([pc, mesh]).show()
    else:
        tensor_inst_points = torch.FloatTensor(arr_mesh_points).cuda()
        tensor_deform_points = dif_model.get_template_coords(tensor_inst_points, cuda_pred_codes[i])
        deform_points = tensor_deform_points.squeeze().detach().cpu().numpy().astype(np.float32)

        min_z = np.amin(arr_mesh_points[:, 2])
        max_z = np.amax(arr_mesh_points[:, 2])
        panda_file_info_ins = {'pred_s': pose_results['pred_s'],
                               'code': cuda_pred_codes[i].detach().cpu().numpy(), 
                               'grasp_params':{
                                                'left_points':np.array([0,0.01,min_z]),
                                                'right_points':np.array([0,0.01,max_z]),
                                                'approach_vector':np.array([1,0,0]),
                                                'depth':0.0,
                                              }
            }
        # for index, grasp_info in enumerate(panda_file_info_temp['grasp_params']):
        #     index_left = np.argmin(np.linalg.norm(deform_points - grasp_info['left_points'], axis=1))
        #     index_right = np.argmin(np.linalg.norm(deform_points - grasp_info['right_points'], axis=1))
        #     if index_left == index_right:
        #         continue
        #     obj_left_points = arr_mesh_points[index_left]
        #     obj_right_points = arr_mesh_points[index_right]
            # panda_file_info_ins['grasp_params'].append({
            #     'left_points':obj_left_points,
            #     'right_points':obj_right_points,
            #     'approach_vector':grasp_info['approach_vector'],
            #     'depth':grasp_info['depth'],
            # })

    return panda_file_info_ins, pose_results

if __name__ == '__main__':

    from utils import get_model
    opt = get_args()
    CLASS_MAP_FOR_CATEGORY = ['bottle', 'bowl', 'camera', 'can', 'laptop', 'mug']
    img_list = []
    for i in range(opt.img_num):
        img_list.append(str(i).zfill(4))

    shape_encoder = ShapeEncoder(opt.global_feat, opt.feat_trans, opt.bn)

    for img in img_list:
        
        with open(os.path.join(opt.data_dir, img + '_pose.pkl'), 'rb') as f:
            pose_results = pickle.load(f)
        obj_pcs = pose_results['obj_pcs']
        bs = obj_pcs.shape[0]
        assert bs == 1
        cuda_obj_pcs = torch.from_numpy(obj_pcs).float().cuda()
        cate = CLASS_MAP_FOR_CATEGORY[pose_results['pred_class_ids'][0]]
        
        opt.shape_model_path = 'shape_encoder/checkpoints/{}.pth'.format(cate)
        shape_encoder.load_state_dict(torch.load(opt.shape_model_path))
        shape_encoder.eval()
        shape_encoder.cuda()
        opt.dif_config = 'dif/configs/generate/{0}.yml'.format(cate)
        with open(os.path.join(opt.dif_config),'r') as stream: 
            meta_params = yaml.safe_load(stream)
        dif_model = get_model(meta_params)
        dif_model.cuda()
        
        cuda_pred_codes = shape_encoder(cuda_obj_pcs)

        for i in range(bs):

            arr_mesh_points = create_mesh(dif_model, filename='shape_encoder/mesh_holder', embedding=cuda_pred_codes[i], N=opt.resolution)
            if opt.vis:
                pc = trimesh.PointCloud(cuda_obj_pcs[i].detach().cpu().numpy(), colors=[0x98, 0x9F, 0xD9, 211])
                mesh = trimesh.load('shape_encoder/mesh_holder.ply')
                trans = np.eye(4)
                mesh.apply_scale(0.5).apply_transform(trans)
                trimesh.Scene([pc, mesh]).show()
            else:
                tensor_inst_points = torch.FloatTensor(arr_mesh_points).cuda()
                tensor_deform_points = dif_model.get_template_coords(tensor_inst_points, cuda_pred_codes[i])
                deform_points = tensor_deform_points.squeeze().detach().cpu().numpy().astype(np.float32)
                
                temp_grasp_path = '{}/{}.pkl'.format(opt.pkl_root, cate)
                with open(temp_grasp_path, 'rb') as f:
                    panda_file_info_temp = pickle.load(f)

                panda_file_info_ins = {
                                    'code': cuda_pred_codes[i].detach().cpu().numpy(), 
                                    'grasp_params':[]}

                for index, grasp_info in enumerate(panda_file_info_temp['grasp_params']):
                    index_left = np.argmin(np.linalg.norm(deform_points - grasp_info['left_points'], axis=1))
                    index_right = np.argmin(np.linalg.norm(deform_points - grasp_info['right_points'], axis=1))
                    if index_left == index_right:
                        continue
                    obj_left_points = arr_mesh_points[index_left]
                    obj_right_points = arr_mesh_points[index_right]
                    panda_file_info_ins['grasp_params'].append({
                        'left_points':obj_left_points,
                        'right_points':obj_right_points,
                        'approach_vector':grasp_info['approach_vector'],
                        'depth':grasp_info['depth'],
                    })

                #TODO: save grasp file, and refine grasp params
