#! /home/handuser/miniconda3/envs/yjh/bin/python3
import os, sys, yaml
import rospy
import pickle
import torch
import numpy as np
import math
import copy
from sensor_msgs.msg import Image
from grasp_generation.srv import GraspGenerate, GraspGenerateResponse

HERE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(HERE_DIR, '../../utils/'))
from image_converter import convert_Image_to_nparray, convert_nparray_to_Image
import ros_np_multiarray as rosnm
sys.path.append(os.path.join(HERE_DIR, '../../objpose_estimation/'))
from scripts.estimator import ObjPoseEst

sys.path.append(os.path.join(HERE_DIR, '../../grasp_generation/transgrasp'))
sys.path.append(os.path.join(HERE_DIR, '../../grasp_generation/transgrasp/dif'))
from shape_encoder.model import ShapeEncoder
from dif.dif_net import DeformedImplicitField
from dif.sdf_meshing import create_mesh
from lib.fk.FK_layer_no_delta import ShadowLayer

sys.path.append(os.path.join(HERE_DIR, '../../grasp_generation/'))
from scripts.utils import *


class GraspGen:
    def __init__(self,device) -> None:
        here = os.path.dirname(os.path.abspath(__file__))
        self.estimator = ObjPoseEst(device)
        self.device = device

        self.assets_dir = os.path.join(here, '../assets')
        self.transgrasp_dir = os.path.join(here, '../transgrasp')

        category = 'bottle'
        self.category = category
        category_id_map = {'bottle':0, 'bowl':1, 'mug':5}
        self.cat_id = category_id_map[self.category] # nocs id

        self.grasp_id = 13
        self.optim_step = 50
        
        # shape encoder
        shape_encoder = ShapeEncoder()
        encoder_model_path = os.path.join(self.assets_dir, f'{category}_model/model_best.pth')
        shape_encoder.load_state_dict(torch.load(encoder_model_path))
        shape_encoder.eval()
        shape_encoder.to(device)
        self.encoder = shape_encoder
        # dif decoder
        dif_config = os.path.join(here, f'../transgrasp/dif/configs/generate/{category}.yml')
        with open(os.path.join(dif_config),'r') as stream: 
            meta_params = yaml.safe_load(stream)
        dif_model = DeformedImplicitField(**meta_params)
        decoder_model_path = os.path.join(self.assets_dir, f'{category}_model/model_final.pth')
        dif_model.load_state_dict(torch.load(decoder_model_path))
        dif_model.to(device)
        self.decoder = dif_model
        # FK_layer
        sampled_pts_path = os.path.join(here, f'../transgrasp/assets/sampled_pts_of_shadow_hand-add-tips.npy')
        hand_mesh_path = os.path.join(here, f'../transgrasp/assets/mjcf/open_ai_assets/stls/hand')
        self.fk = ShadowLayer(device, sampled_pts_path, hand_mesh_path)

    def serve_as_regular_publisher(self, rate_hz=1):
        rospy.init_node('grasp_generation', anonymous=True)
        self.puber1 = rospy.Publisher('/dexgrasp/mask', Image, queue_size=1)
        self.puber2 = rospy.Publisher('/dexgrasp/pose', Image, queue_size=1)
        self.puber3 = rospy.Publisher('/dexgrasp/grasp', MarkerArray, queue_size=1)
        self.puber4 = rospy.Publisher('/dexgrasp/reconstruct', Image, queue_size=1)
        self.puber5 = rospy.Publisher('/dexgrasp/grasp_render', Image, queue_size=1)
        rate = rospy.Rate(rate_hz)
        while not rospy.is_shutdown():
            img_raw = rospy.wait_for_message("/camera/color/image_raw", Image)
            depth_raw = rospy.wait_for_message("/camera/aligned_depth_to_color/image_raw", Image)
            result_instseg, img_masked_msg = self.estimator.detector.run(img_raw)
            self.puber1.publish(img_masked_msg)
            result_pose, img_posed_msg = self.estimator.run(img_raw, depth_raw, result_instseg)
            self.puber2.publish(img_posed_msg)
            result_grasp, grasp_markerarray_msg, img_rendered_obj_msg, img_rendered_hand_msg = self.run(img_raw, result_pose)
            self.puber3.publish(grasp_markerarray_msg)
            self.puber4.publish(img_rendered_obj_msg)
            self.puber5.publish(img_rendered_hand_msg)
            rate.sleep()

    def serve_as_server(self):
        rospy.init_node('grasp_generation', anonymous=True)
        self.puber1 = rospy.Publisher('/dexgrasp/mask', Image, queue_size=1)
        self.puber2 = rospy.Publisher('/dexgrasp/pose', Image, queue_size=1)
        # self.puber3 = rospy.Publisher('/dexgrasp/grasp', MarkerArray, queue_size=1)
        self.puber4 = rospy.Publisher('/dexgrasp/reconstruct', Image, queue_size=1)
        self.puber5 = rospy.Publisher('/dexgrasp/grasp_render', Image, queue_size=1)        
        rospy.Service('/dexgrasp/grasp_server', GraspGenerate, self.full_pipeline_run_callback)
        rospy.wait_for_service('/dexgrasp/grasp_server')
        rospy.spin()

    def full_pipeline_run_callback(self, request):
        img_raw = request.color
        depth_raw = request.depth
        result_instseg, img_masked_msg = self.estimator.detector.run(img_raw)
        self.puber1.publish(img_masked_msg)
        result_pose, img_posed_msg = self.estimator.run(img_raw, depth_raw, result_instseg)
        self.puber2.publish(img_posed_msg)
        result_grasp, grasp_markerarray_msg, img_rendered_obj_msg, img_rendered_hand_msg = self.run(img_raw, result_pose)
        # self.puber3.publish(grasp_markerarray_msg)
        self.puber4.publish(img_rendered_obj_msg)
        self.puber5.publish(img_rendered_hand_msg)
        response = GraspGenerateResponse()
        if result_grasp is not None:
            response.q = result_grasp['q']
            response.t = result_grasp['t']
            response.obj_q = result_grasp['obj_q']
            response.obj_t = result_grasp['obj_t']
            response.theta = result_grasp['theta']
            response.grasp_marker = grasp_markerarray_msg
        return response

    def get_grasp_points_on_template(self, grasp_id):
        with open(os.path.join(self.transgrasp_dir, 'cfg/class_cfg.yml'),'r') as stream: 
            class_cfg = yaml.safe_load(stream)
                
        src_inst = class_cfg[self.category]['src_inst_name']
        
        label_file_path = os.path.join(self.assets_dir, f'grasp_label/{self.category}/train/{src_inst}/0/label_grasp_{grasp_id}.npz')
        label = np.load(label_file_path)
        palm_q = label['palm_q'] # [x,y,z,w]
        base = np.concatenate([label['palm_t'], palm_q], axis=0)
        rotations = label['joints'][2:24]
        rotations = torch.FloatTensor(rotations).reshape(1, -1).cuda()
        base      = torch.FloatTensor(base).reshape(1, -1).cuda()
        output = self.fk.get_posed_pc(base, rotations)
        hand_pts = output.verts
        hand_pts /= 0.5 * label["obj_scale"]
        # get dif's code of source object
        with open(os.path.join(self.assets_dir, f'gt_codes/{self.category}_train.pkl'), 'rb') as f:
            codes = pickle.load(f)
        code = torch.from_numpy(codes[f'{src_inst}/0']).cuda()
        hand_pts_template = self.decoder.get_template_coords(hand_pts, code) # [1, hand pts num, 3]
        hand_pts_sdf = self.decoder.get_template_field(hand_pts_template) # [1, hand pts num, 1]
        
        inner_idx = torch.norm(hand_pts_template, dim=2)<0.99
        contact_index = torch.abs(hand_pts_sdf[:,:,0])<5e-3
        non_contact_index = hand_pts_sdf[:,:,0]>5e-2
        
        contact_index = contact_index & inner_idx
        non_contact_index = non_contact_index & inner_idx
        inner_index = torch.where(inner_idx == True)[1]
        contact_index = torch.where(contact_index == True)[1]
        non_contact_index = torch.where(non_contact_index == True)[1]
        contact_pts_template = hand_pts_template[0, contact_index]
        inner_index = inner_index.detach().cpu().numpy()
        contact_index = contact_index.detach().cpu().numpy()
        non_contact_index = non_contact_index.detach().cpu().numpy()
        
        # vis
        # import trimesh
        # mesh = trimesh.load(f'grasp_generation/transgrasp/dif/recon/mug_expand_0_correct/train/template.ply')
        # mesh.visual.face_colors[:,3] = 160
        # pc = trimesh.PointCloud(hand_pts_template[0, contact_index].detach().cpu().numpy(), colors=np.array([255,0,0,255]))
        # pc_all = trimesh.PointCloud(hand_pts_template[0, inner_index].detach().cpu().numpy(), colors=np.array([0,255,0,255]))
        # pc_non = trimesh.PointCloud(hand_pts_template[0, non_contact_index].detach().cpu().numpy(), colors=np.array([0,0,255,255]))
        # trimesh.Scene([mesh, pc_all, pc, pc_non]).show()

        return contact_pts_template, contact_index, non_contact_index, label

    def run(self, image_raw:Image, result_pose):
        if result_pose is None:
            return None, None, image_raw, image_raw
    
        img = convert_Image_to_nparray(image_raw)
        
        # array, maybe many objects are self.category
        idxs = np.where(result_pose['pred_class_ids'] == self.cat_id)[0]
        num_objects = idxs.shape[0]

        if num_objects == 0:
            return None, None, None, None

        obj_pcs = result_pose['obj_pcs'][idxs]
        th_obj_pcs = torch.from_numpy(obj_pcs).to(self.device) # 'batch' dim
        pred_codes = self.encoder(th_obj_pcs)

        g_pts_temp, contact_idx, non_contact_idx, grasp_label = self.get_grasp_points_on_template(self.grasp_id)
        # contact points mapping
        g_pts_inst = torch.zeros([num_objects, g_pts_temp.size(0), 3]).to(self.device)
        for i in range(num_objects):
            mesh_pts = create_mesh(self.decoder, filename=os.path.join(self.assets_dir, f'mesh_for_vis/{i}'), 
                                embedding=pred_codes[i], 
                                N=64,
                                get_color=False)
            q_pts = torch.FloatTensor(mesh_pts).to(self.device)[None, ...]
            q_pts_dfm = self.decoder.get_template_coords(q_pts, pred_codes[i])[0] # [0] remove 'batch' dim
            for j in range(g_pts_temp.size(0)):
                index = torch.argmin(torch.norm(q_pts_dfm - g_pts_temp[j], dim=1), dim=0)
                g_pts_inst[i, j] = q_pts[0, index]
        g_pts_inst = g_pts_inst * torch.FloatTensor(result_pose['pred_s'][idxs] * 0.5).to(self.device).view(num_objects, 1, 1)
        
        # grasp transfer
        q = torch.FloatTensor(grasp_label['palm_q']).to(self.device) # [x,y,z,w]
        t = torch.FloatTensor(grasp_label['palm_t']).to(self.device)
        joint = torch.FloatTensor(grasp_label['joints'][2:24]).to(self.device)
        q = q.view(1, -1).repeat(num_objects, 1)
        t = t.view(1, -1).repeat(num_objects, 1)
        joint = joint.view(1, -1).repeat(num_objects, 1)
        t.requires_grad_()
        joint.requires_grad_()
        optim = torch.optim.Adamax([{'params': t, 'lr': 1e-4},
                                    {'params': joint, 'lr': 1e-2}])
        mseloss = torch.nn.MSELoss()
        
        obj_inv_scale = torch.FloatTensor(2. / result_pose['pred_s'][idxs]).to(self.device).view(num_objects, 1, 1)
        for i in range(self.optim_step):
            root = torch.cat([t, q], dim=1)
            out = self.fk.get_posed_pc(root, joint)
            pcs = out.verts
            key_pts = out.key_pts_pos
            
            # transfer loss
            sup_positions = pcs[:, contact_idx]
            l_transfer = mseloss(sup_positions, g_pts_inst)
            ## other losses
            norm_pcs = pcs * obj_inv_scale
            contact_pts = torch.clone(norm_pcs[:, contact_idx, :])
            contact_pts_sdfs = self.decoder.inference_with_grad(contact_pts, pred_codes)
            non_contact_pts = torch.clone(norm_pcs[:, non_contact_idx, :])
            non_contact_pts_sdfs = self.decoder.inference_with_grad(non_contact_pts, pred_codes)
            # contact loss
            l_contact = torch.where(contact_pts_sdfs>0, contact_pts_sdfs, torch.zeros_like(contact_pts_sdfs))
            l_contact = torch.mean(l_contact)
            # hand-object collision loss
            l_collision = -torch.where(non_contact_pts_sdfs<0, non_contact_pts_sdfs, torch.zeros_like(non_contact_pts_sdfs))
            l_collision = torch.mean(l_collision)
            # self-collision loss of robot hand
            l_self_colli = self.fk.compute_loss_self_collision(key_pts)
            l_self_colli = torch.mean(l_self_colli)
            # limit loss
            l_limit = self.fk.compute_loss_limit(joint)

            l_transfer *= 1e6
            # l_transfer_decay = l_transfer * math.exp(-0.02 * epoch)
            l_transfer_decay = l_transfer
            l_contact *= 1e2
            l_collision *= 1e4
            l_self_colli *= 1e4
            l_limit *= 1e6

            l_total = l_transfer_decay + l_contact + l_collision + l_self_colli + l_limit
            optim.zero_grad()
            l_total.backward(retain_graph=True)
            optim.step()

            if i % 20 == 0 or i == self.optim_step - 1:
                print(f'[{i:5d} / {self.optim_step:5d}]' +
                      f'tf: {l_transfer.item():.3f},' +
                      f'cont: {l_contact.item():.3f},' +
                      f'colli: {l_collision.item():.3f},' +
                      f'self: {l_self_colli.item():.3f},' +
                      f'lmt: {l_limit.item():.3f}')

        joint = self.fk.set_rotations_into_window(joint)

        q = q.detach().cpu().numpy()
        t = t.detach().cpu().numpy()
        joint = joint.detach().cpu().numpy()
        

        img_rendered_obj = copy.deepcopy(img)
        img_rendered_hand = copy.deepcopy(img)
        for i in range(num_objects):
            sRT = copy.deepcopy(result_pose['pred_sRT'][idxs[i]])
            sRT[:3, :3] *= 0.5
            img_rendered_obj = draw_mesh(img_rendered_obj, self.estimator.cam_K, os.path.join(self.assets_dir, f'mesh_for_vis/{i}.ply'), sRT[np.newaxis, ...], vertex_color=np.array([0, 255, 0, 255]))
            mesh = self.fk.get_posed_mesh(np.concatenate([t[i], q[i]], axis=0), joint[i])
            trans = np.eye(4)
            trans[:3, :3] = result_pose['pred_R'][idxs[i]]
            trans[:3, 3]  = result_pose['pred_t'][idxs[i]]
            # mesh.apply_transform(trans)  
            shadow_mesh_path = os.path.join(self.assets_dir, f'mesh_for_vis/{i}_grasp.ply')
            os.system(f'rm {shadow_mesh_path}')
            mesh.export(os.path.join(self.assets_dir, f'mesh_for_vis/{i}_grasp.ply'))
            img_rendered_hand = draw_mesh(img_rendered_hand, self.estimator.cam_K, os.path.join(self.assets_dir, f'mesh_for_vis/{i}_grasp.ply'), trans[np.newaxis, ...],vertex_color=np.array([0, 0, 255, 255]))

        
        img_rendered_obj_msg = convert_nparray_to_Image(img_rendered_obj)
        img_rendered_hand_msg = convert_nparray_to_Image(img_rendered_hand)
        
        marker_list = []
        
        for i in range(num_objects):
            # mesh_obj_path = 'file://{}'.format(os.path.join(self.assets_dir, f'mesh_for_vis/{i}.ply'))
            t_obj = result_pose['pred_t'][idxs[i]]
            q_obj = R2q(result_pose['pred_R'][idxs[i]])
            # marker_obj = get_marker_msg(mesh_obj_path, i, color=[0,1,0,1], scale=result_pose['pred_s'][i] * 0.5, position=t_obj, orientation=q_obj)
            # marker_list.append(marker_obj)
            shadow_mesh_path = os.path.join(self.assets_dir, f'mesh_for_vis/{i}_grasp.ply')
            mesh_hand_path = 'file://{}'.format(shadow_mesh_path)
            marker_hand = get_marker_msg(mesh_hand_path, 2 * i, color=[0,1,0,1], position=t_obj, orientation=q_obj)
            marker_list.append(marker_hand)
            # import trimesh
            # mesh1 = trimesh.load(os.path.join(self.assets_dir, f'mesh_for_vis/{i}.ply'))
            # mesh1.apply_scale(0.5)
            # mesh1.apply_transform(result_pose['pred_sRT'][idxs[i]])
            # mesh2 = trimesh.load(os.path.join(self.assets_dir, f'mesh_for_vis/{i}_grasp.ply'))
            # pc = trimesh.PointCloud(g_pts_inst[i].detach().cpu().numpy())
            # trimesh.Scene([mesh1, mesh2, pc]).show()

        markerarray_msg = get_markerarray_msg(marker_list)

        results_grasp = dict()
        grasp_OBJ = np.eye(4)
        grasp_OBJ[:3, :3] = q2R(q[0])
        if self.category == 'bottle' and self.grasp_id == 1:
            grasp_OBJ[:3, :3] = rot_Y(np.pi) @ grasp_OBJ[:3, :3]
        grasp_OBJ[:3,  3] = t[0]
        theta = joint[0]
        obj_CAM = np.eye(4)
        obj_CAM[:3, :3] = result_pose['pred_R'][idxs[0]]
        obj_CAM[:3,  3] = result_pose['pred_t'][idxs[0]]
        grasp_CAM = np.matmul(obj_CAM, grasp_OBJ)

        results_grasp['q'] = rosnm.to_multiarray_f32(R2q(grasp_CAM[:3, :3]))
        results_grasp['t'] = rosnm.to_multiarray_f32(grasp_CAM[:3, 3])
        results_grasp['obj_q'] = rosnm.to_multiarray_f32(R2q(obj_CAM[:3, :3]))
        results_grasp['obj_t'] = rosnm.to_multiarray_f32(obj_CAM[:3, 3])
        results_grasp['theta'] = rosnm.to_multiarray_f32(theta)
        return results_grasp, markerarray_msg, img_rendered_obj_msg, img_rendered_hand_msg
    
from math import cos, sin
def rot_Y(theta):
    return np.array([[cos(theta),0,sin(theta)],[0,1,0],[-sin(theta),0,cos(theta)]],dtype=np.float32)