# Here, we use a simple optimization method to refine the grasp pose.
from hashlib import new
from math import inf
from numpy.random.mtrand import sample
import torch
import torch.nn as nn
import numpy as np
import os
import yaml
import sys
import pickle
import configargparse
from tqdm import tqdm
import time
import logging
logger = logging.getLogger(__name__)
logger.setLevel(level = logging.INFO)

sys.path.append('./')
sys.path.append('./dif')
from utils import get_points_sdf_normal, get_template_points_sdf_normal


##########################################################
##################Some weights for loss function##########
##########################################################
w_all = {'l_anti':100,
        'l_touch':20,
        'l_collision':10,
        'l_reg':1
        }

w_all_choose_best = {'l_anti':100,
                    'l_touch':20,
                    'l_depth':0,
                    'l_reg':0
                    }

w_reg = [100,  # w_reg_1
        100,  # w_reg_2
        100,  # w_reg_d
        100   # w_reg_v
        ]
##########################################################
##########################################################

def get_RTs_from_grasp_params_tensor(obj_left_points, obj_right_points, \
                                    vectors, depths, obj_scale):
    """
    Tensor version
    """
    hori = obj_right_points - obj_left_points
    assert (torch.norm(hori, dim=1) > 0).all(), "Warning! Only one point!!"
    hori = hori / torch.norm(hori, dim=1, keepdim=True)
    normal = torch.cross(hori, vectors, dim=1)
    normal = normal / torch.norm(normal, dim=1, keepdim=True)
    
    appro_correct = torch.cross(normal, hori, dim=1)

    RTs = torch.eye(4, 4).repeat(vectors.shape[0], 1, 1).cuda()
    RTs[:, :3, :3] = torch.stack((normal, hori, appro_correct), dim=2)
    RTs[:, :3, 3] = (obj_left_points + obj_right_points) *  (obj_scale / 2) / 2
    RTs[:, :3, 3] -= appro_correct*depths.reshape(-1, 1)

    # hori_correct = torch.cross(vectors, normal, dim=1)

    # RTs = torch.eye(4, 4).repeat(vectors.shape[0], 1, 1).cuda()
    # RTs[:, :3, :3] = torch.stack((normal, hori_correct, vectors), dim=2)
    # RTs[:, :3, 3] = (obj_left_points + obj_right_points) *  (obj_scale / 2) / 2
    # RTs[:, :3, 3] -= vectors*depths.reshape(-1, 1)

    return RTs.float().cuda()

def generate_gripper_points(num_points=1000):
    """
    To generate the points of gripper Panda.
    param:
        num_points: number of gripper points to generate
    """
    approach_length = 6.59999996e-02
    horizon_length = 2*4.100000e-02
    fingers_length = 2*(1.12169998e-01 - 6.59999996e-02)
    total_length = approach_length + horizon_length + fingers_length
    num_horizon = horizon_length / total_length * num_points
    num_finger = fingers_length / total_length * num_points /2.
    num_approach = num_points - int(num_horizon+0.5) - int(num_finger+0.5)*2

    approach_points = np.linspace([0,0,0], [0,0,approach_length], int(num_approach+0.5))
    horizon_points = np.linspace([0,-horizon_length/2.,approach_length], [0,horizon_length/2.,approach_length], int(num_horizon+0.5))
    finger1_points = np.linspace([0,-horizon_length/2.,approach_length], [0,-horizon_length/2.,approach_length+fingers_length/2.], int(num_finger+0.5))
    finger2_points = np.linspace([0,horizon_length/2.,approach_length], [0,horizon_length/2.,approach_length+fingers_length/2.], int(num_finger+0.5))

    gripper_points = np.concatenate((approach_points, horizon_points, finger1_points, finger2_points), dtype=np.float32)
    return gripper_points

def get_mesh_sampled_points(gripper_name, num_points):
    root = os.path.dirname(os.path.abspath(__file__))
    pts = np.load('{}/assets/{}.npy'.format(root, gripper_name))
    assert len(pts) > num_points, 'please sample more points from mesh!'
    sample_index = np.random.choice(pts.shape[0], num_points, replace=False)
    return pts[sample_index]

class PandaRefine(nn.Module):
    def __init__(self, grasp_info, obj_scale, shape_code=None):
        super(PandaRefine, self).__init__()

        self.grasp_info = grasp_info
        self.grasp_params = grasp_info['grasp_params']
        self.obj_scale = obj_scale
        self.shape_code = shape_code
        if type(self.grasp_params) is dict:
            self.grasp_params = [self.grasp_params]
        self.num_grasps = len(self.grasp_params)
        self.emb = nn.Embedding(self.num_grasps, 10)

        self.best_anti = 0
        self.best_touch = 0
        self.best_collision = 0

        nn.init.zeros_(self.emb.weight)

    def forward(self, model, choose=True):
        losses = self.get_refine_loss(model, choose)
        return losses

    def get_refine_loss(self, model, choose=True):
        """
        
        """
        deltas = self.emb.weight
        obj_scale = self.obj_scale
        shape_code = self.shape_code

        grasp_num = len(self.grasp_params)
        delta_p1 = deltas[:, :3]
        delta_p2 = deltas[:, 3:6]
        delta_depth = deltas[:, 6]
        delta_vector = deltas[:, 7:]

        obj_left_points = []
        obj_right_points = []
        depths = []
        vectors = []

        # Transfer grasp_params to tensor:
        for grasp_param in self.grasp_params:
            obj_left_points.append(grasp_param['left_points'])
            obj_right_points.append(grasp_param['right_points'])
            depths.append(grasp_param['depth'])
            vectors.append(grasp_param['approach_vector'])

        obj_left_points = np.array(obj_left_points, dtype=np.float32)
        obj_right_points = np.array(obj_right_points, dtype=np.float32)
        depths = np.array(depths, dtype=np.float32)
        vectors = np.array(vectors, dtype=np.float32)

        obj_left_points = torch.from_numpy(obj_left_points).cuda()
        obj_right_points = torch.from_numpy(obj_right_points).cuda()
        vectors = torch.from_numpy(vectors).cuda()
        depths = torch.from_numpy(depths).cuda()

        # Add delta:
        obj_left_points = obj_left_points + delta_p1
        obj_right_points = obj_right_points + delta_p2
        vectors_dt = vectors + delta_vector
        depths = depths + delta_depth

        # Get RT from grasp_params:
        RTs = get_RTs_from_grasp_params_tensor(obj_left_points, obj_right_points, vectors_dt, depths, obj_scale)

        ##########################################################
        ##########################l_anti##########################
        ##########################################################
        # Antipodal rules:

        if shape_code is None:
            lp_sdfs, lp_normals = get_template_points_sdf_normal(model, obj_left_points)
            rp_sdfs, rp_normals = get_template_points_sdf_normal(model, obj_right_points)
        else:
            lp_sdfs, lp_normals = get_points_sdf_normal(model, shape_code, obj_left_points)
            rp_sdfs, rp_normals = get_points_sdf_normal(model, shape_code, obj_right_points)
        lp_normals = lp_normals / torch.norm(lp_normals, dim=1, keepdim=True)
        rp_normals = rp_normals / torch.norm(rp_normals, dim=1, keepdim=True)
        vec_r2l = obj_left_points - obj_right_points
        vec_r2l = vec_r2l / torch.norm(vec_r2l, dim=1, keepdim=True)
        vec_l2r = obj_right_points - obj_left_points
        vec_l2r = vec_l2r / torch.norm(vec_l2r, dim=1, keepdim=True)

        l_anti = -torch.sum(lp_normals * vec_r2l, dim=1) - torch.sum(rp_normals * vec_l2r, dim=1)
        l_anti += 2.

        ##########################################################
        ##########################l_touch#########################
        ##########################################################
        l_touch = torch.norm(lp_sdfs, dim=1) + torch.norm(rp_sdfs, dim=1)


        ##########################################################
        ##########################l_collision###########################
        ##########################################################
        # sdfs of Gripper's points
        num_grasp_points = 150
        # gripper_points = generate_gripper_points(num_grasp_points)
        gripper_points = get_mesh_sampled_points('thin_hand',num_grasp_points)
        gripper_points = torch.from_numpy(gripper_points).float().cuda()
        gripper_points = gripper_points.view(1, num_grasp_points, 3).repeat(grasp_num, 1, 1)
        gripper_points_in_objcoords = torch.bmm(gripper_points, RTs[:,:3,:3].transpose(1,2)) + RTs[:,:3,3].unsqueeze(1)
        gripper_points_in_objcoords = gripper_points_in_objcoords.view(-1,3) / (obj_scale /2.)
        
        if shape_code is None:
            gripper_sdfs, _ = get_template_points_sdf_normal(model, gripper_points_in_objcoords)
        else:
            gripper_sdfs, _ = get_points_sdf_normal(model, shape_code, gripper_points_in_objcoords)
        gripper_sdfs = -torch.where(gripper_sdfs>0, torch.zeros_like(gripper_sdfs), gripper_sdfs)
        l_collision = torch.sum(gripper_sdfs.view(grasp_num, num_grasp_points), dim=1)


        ##########################################################
        ##########################l_reg###########################
        ##########################################################
        # l_reg_v = torch.norm(delta_vector, dim=1)
        # l_reg_v = -torch.sum(vectors*(vectors+delta_vector), dim=1) / (torch.norm(vectors, dim=1) * torch.norm(vectors+delta_vector, dim=1))
        l_reg_v = -torch.sum(vectors*vectors_dt, dim=1) / (torch.norm(vectors, dim=1) * torch.norm(vectors_dt, dim=1))
        l_reg_v = l_reg_v/2.+0.5
        l_reg = w_reg[0]*torch.norm(delta_p1, dim=1) \
            + w_reg[1]*torch.norm(delta_p2, dim=1) \
            + w_reg[2]*torch.abs(delta_depth) \
            + w_reg[3]*l_reg_v

        ##########################################################
        #######################loss_total#########################
        ##########################################################
        loss_total = w_all['l_anti']*l_anti + w_all['l_touch']*l_touch + w_all['l_collision']*l_collision + w_all['l_reg']*l_reg


        ##########################################################
        l_depth = depths


        no_collision_index = torch.where(l_collision == 0)[0] # torch.where's output is tuple(numpy)

        if min(no_collision_index.shape) != 0:
            loss_choose = w_all_choose_best['l_anti']*l_anti[no_collision_index] \
                + w_all_choose_best['l_touch']*l_touch[no_collision_index] \
                + w_all_choose_best['l_reg']*l_reg[no_collision_index]
            _idx = torch.argmin(loss_choose)
            loss_choose = loss_choose[_idx]
            best_idx = no_collision_index[_idx]
        else:
            print(no_collision_index.shape)
            print("[Refine Grasp] All grasp collision! Choose best one according to \'loss_total\'...")
            _idx = torch.argmin(loss_total)
            loss_choose = loss_total[_idx]
            best_idx = _idx

        loss_total = loss_total.mean()
        l_anti = l_anti.mean()
        l_touch = l_touch.mean()
        l_collision = l_collision.mean()
        l_reg = l_reg.mean()

        return {'loss':loss_total,
                'l_touch':l_touch,
                'loss_choose':loss_choose,
                'l_anti':l_anti,
                'l_collision':l_collision,
                'l_reg':l_reg,
                'l_depth':l_depth.mean(),
                'best_idx': best_idx,
                }

def get_results_from_pth(grasp_info, deltas):
    
    deltas = deltas['emb.weight'].data.cpu().numpy()
    delta_p1 = deltas[:, :3]
    delta_p2 = deltas[:, 3:6]
    delta_depth = deltas[:, 6]
    delta_vector = deltas[:, 7:]

    if type(grasp_info['grasp_params']) is dict:
        grasp_param = grasp_info['grasp_params']
        grasp_param['left_points'] += delta_p1[0]
        grasp_param['right_points'] += delta_p2[0]
        grasp_param['depth'] += delta_depth[0]
        grasp_param['approach_vector'] += delta_vector[0]
    else:
        for i, grasp_param in enumerate(grasp_info['grasp_params']):
            grasp_param['left_points'] += delta_p1[i]
            grasp_param['right_points'] += delta_p2[i]
            grasp_param['depth'] += delta_depth[i]
            grasp_param['approach_vector'] += delta_vector[i]
    return grasp_info

def get_best_results_from_pth(grasp_info, deltas, best_idx):
    deltas = deltas['emb.weight'].data.cpu().numpy()
    delta = deltas[best_idx]
    delta_p1 = delta[:3]
    delta_p2 = delta[3:6]
    delta_depth = delta[6]
    delta_vector = delta[7:]

    if type(grasp_info['grasp_params']) is dict:
        grasp_param = grasp_info['grasp_params']
        grasp_param['left_points'] += delta_p1
        grasp_param['right_points'] += delta_p2
        grasp_param['depth'] += delta_depth
        grasp_param['approach_vector'] += delta_vector
    else:
        grasp_info['grasp_params'] = grasp_info['grasp_params'][best_idx]
        grasp_param = grasp_info['grasp_params']
        grasp_param['left_points'] += delta_p1
        grasp_param['right_points'] += delta_p2
        grasp_param['depth'] += delta_depth
        grasp_param['approach_vector'] += delta_vector
    return grasp_info

def choose_best_results_with_index(grasp_info, best_idx):
    if type(grasp_info['grasp_params']) is dict:
        pass
    else:
        grasp_info['grasp_params'] = grasp_info['grasp_params'][best_idx]
    return grasp_info