import trimesh
import math
import scipy
import os
import numpy as np
import json
import pickle
from copy import deepcopy
from perception import DepthImage, CameraIntrinsics
from autolab_core import RigidTransform
import multiprocessing as mp
from grasping.grasp_sampler import GraspSampler


def show(scene):
    scene.show()

def save_dict(dict_, k):
    try:
        np.savez('./mask_label/image_{:05d}.npz'.format(k), dict_)
        print('save finished')
    except:
        print('save failed')
        pass
    

def generate_label(k, base, finger, bin_mesh, obj_ids, obj_poses, image_data, camera_intr_, camera_pose):
    print('k is ', k)
    if os.path.exists('./mask_label/image_{:05d}.npz'.format(k)):
        print('./mask_label/image_{:05d}.npz already exists'.format(k))
        return
    scene = trimesh.Scene()
    scene.add_geometry(bin_mesh)

    depth_image =  DepthImage(image_data, frame='camera')
    camera_intr = CameraIntrinsics(frame='camera', fx=camera_intr_[0], fy=camera_intr_[1], cx=camera_intr_[2], cy=camera_intr_[3], skew=camera_intr_[4], height=camera_intr_[5], width=camera_intr_[6])
    point_normal_cloud = depth_image.point_normal_cloud(camera_intr)
    point_data = point_normal_cloud.points.data
    T_matrix = trimesh.transformations.translation_matrix(camera_pose[:3])
    rotation_matrix = trimesh.transformations.quaternion_matrix(camera_pose[3:])
    matrix = trimesh.transformations.concatenate_matrices(T_matrix,rotation_matrix)
    matrix = np.asanyarray(matrix, order='C', dtype=np.float64)
    point_data = trimesh.transformations.transform_points(point_data.transpose(1,0), matrix)
    point_grasp_dict = {}
   
    point_tree = scipy.spatial.KDTree(point_data)
    point_cloud = trimesh.PointCloud(point_data, colors=[0, 255, 0])
    #point_cloud.show()

#for obj_ids_per_scene, obj_poses_per_scene in zip(obj_ids, obj_poses):
    obj_ids_per_scene, obj_poses_per_scene = obj_ids[(k%200)//2], obj_poses[(k%200)//2]
    obj_dict = {}
    mesh_dict = {}

    for obj_id, obj_pose in zip(obj_ids_per_scene, obj_poses_per_scene):
        if obj_id == 4294967295:
            continue
        #print(obj_id, obj_pose)
        obj_key = metadata['obj_ids'][str(obj_id[0])]
        obj_path = metadata['meshes'][obj_key]
        obj = trimesh.load_mesh(obj_path)
        trimesh.smoothing.filter_humphrey(obj)
        T_matrix = trimesh.transformations.translation_matrix(obj_pose[:3])
        rotation_matrix = trimesh.transformations.quaternion_matrix(obj_pose[3:])
        matrix = trimesh.transformations.concatenate_matrices(T_matrix, rotation_matrix)
        obj.apply_transform(matrix)
        scene.add_geometry(obj)
        obj_dict[obj_key] = matrix
        mesh_dict[obj_key] = obj
    
    collision_manager, _ = trimesh.collision.scene_to_collision(scene)
    for key, value in obj_dict.items():
        key_ori = key
        key = key.split('~')[1]
        # load the badpoints for negtive points
        badpoints = np.load(grasp_path + '{}/badpoints.npy'.format(key))
        badpoints = trimesh.transformations.transform_points(badpoints, value)
        if badpoints.shape[0] > 0:
            points_query = point_tree.query_ball_point(badpoints, r=0.005, p=2)
            points_query = [item for sublist in points_query for item in sublist]
            points_query = list(set(points_query))
            # the default setting let the points_query not appear in background or other object
            if len(points_query) > 0:
                signed_distance = trimesh.proximity.signed_distance(mesh_dict[key_ori], point_data[points_query])
                mask = abs(signed_distance) < 0.0004
                points_query = [points_query[i] for i in range(len(mask)) if mask[i]]
                for index in points_query:
                    if index not in point_grasp_dict.keys() or 0 > point_grasp_dict[index][0]:
                        point_grasp_dict[index]= (0, 0.0, np.identity(4), 0.0, 0.0)
        files = os.listdir(grasp_path + '{}'.format(key))
        for file_name in files:
            if os.path.splitext(file_name)[0].startswith('final'):
                with open(grasp_path + '{}/{}'.format(key, file_name), 'rb') as f:
                    grasps = pickle.load(f)
#                    pruned_grasps = GraspSampler.nms_grasps(grasps,threshold=0.01)
#                    for grasp in pruned_grasps:
                    for grasp in grasps:
                        is_collision_flag = False
                        angle_candidates = np.arange(grasp.approach_angle, grasp.approach_angle+1, 180)
                        #angle_candidates = np.arange(-180, 180, 30)
                        #np.random.shuffle(angle_candidates)

                        for grasp_angle in angle_candidates:
                            grasp.approach_angle = grasp_angle
                            pose = RigidTransform(grasp.rotated_full_axis, grasp.center, from_frame='gripper', to_frame='obj')
                            transform = trimesh.transformations.concatenate_matrices(value, pose.matrix)
                            z_vec = transform[:3, 0]
                    
                            if z_vec[2] > -math.sin(math.pi/12):
                                #print('test a single grasp angle')
                                is_collision_flag = True
                                #continue
                            base_copy = deepcopy(base)
                            base_copy.apply_transform(transform)
                            finger_left_copy = deepcopy(finger)
                            T_finger = trimesh.transformations.translation_matrix([-(grasp.real_jaw_width_/2+0.005), 0,0])
                            R_finger_left = trimesh.transformations.euler_matrix(np.pi/2, np.pi/2, 0, 'rxyz')
                            Trans_finger_left = trimesh.transformations.concatenate_matrices(transform, R_finger_left, T_finger)

                            finger_left_copy.apply_transform(Trans_finger_left)
                            finger_right_copy = deepcopy(finger)
                            R_finger_right = trimesh.transformations.euler_matrix(np.pi/2, np.pi/2, np.pi, 'rxyz')
                            Trans_finger_right = trimesh.transformations.concatenate_matrices(transform, R_finger_right, T_finger)
                            finger_right_copy.apply_transform(Trans_finger_right)
                            if is_collision_flag:
                                is_collision = True
                            else:
                                is_collision = collision_manager.in_collision_single(finger_left_copy, return_names=False)\
                                or collision_manager.in_collision_single(finger_right_copy, return_names=False)\
                                or collision_manager.in_collision_single(base_copy, return_names=False) 

                            if not is_collision:
                                contact_points = grasp.contactpoints
                                contact_points = trimesh.transformations.transform_points(contact_points, value)
                                #contact_points_cloud = trimesh.PointCloud(contact_points, colors=[255, 0, 0])
                                points_query = point_tree.query_ball_point(contact_points, r=0.006, p=2)
                                if len(points_query[0]) > 2:
                                    if len(points_query[1]) > 2:
                                        points_query = points_query[0] + points_query[1]
                                    else:
                                        points_query = points_query[0]
                                elif len(points_query[1]) > 2:
                                    points_query = points_query[1]
                                else:
                                    points_query = []
                                    
                                if len(points_query) > 0:
                                    # the default setting let the points_query not appear in background or other object
                                    signed_distance = trimesh.proximity.signed_distance(mesh_dict[key_ori], point_data[points_query])
                                    mask = abs(signed_distance) < 0.0005
                                    points_query = [points_query[i] for i in range(len(mask)) if mask[i]]
                                    for index in points_query:
                                        if index not in point_grasp_dict.keys() or grasp.grasp_score > point_grasp_dict[index][1] or point_grasp_dict[index][0]==0:
                                            point_grasp_dict[index]= (1, grasp.grasp_score, transform, grasp.jaw_width_, grasp.real_jaw_width_)
                            else:
                                contact_points = grasp.contactpoints
                                contact_points = trimesh.transformations.transform_points(contact_points, value)
                                #contact_points_cloud = trimesh.PointCloud(contact_points, colors=[255, 0, 0])
                                points_query = point_tree.query_ball_point(contact_points, r=0.005, p=2)
                                if len(points_query[0]) > 3:
                                    if len(points_query[1]) > 3:
                                        points_query = points_query[0] + points_query[1]
                                    else:
                                        points_query = points_query[0]
                                elif len(points_query[1]) > 3:
                                    points_query = points_query[1]
                                else:
                                    points_query = []
                                if len(points_query) > 0:    
                                    # the default setting let the points_query not appear in background or other object
                                    signed_distance = trimesh.proximity.signed_distance(mesh_dict[key_ori], point_data[points_query])
                                    mask = abs(signed_distance) < 0.0004
                                    points_query = [points_query[i] for i in range(len(mask)) if mask[i]]
                                    for index in points_query:
                                        if index not in point_grasp_dict.keys() or (grasp.grasp_score > point_grasp_dict[index][1] and point_grasp_dict[index][0]==0):
                                            point_grasp_dict[index]= (0, grasp.grasp_score, transform, grasp.jaw_width_, grasp.real_jaw_width_)
                            
                            if debug_vis and not is_collision:
                            #if debug_vis:
                                scene_ = deepcopy(scene)
                                ray_origins =np.array([0., 0., 0.])
                                ray_directions = z_vec
                                ray_visualize = trimesh.load_path(np.hstack((ray_origins, ray_origins+ray_directions / 2)).reshape(-1, 2, 3))
                                scene_.add_geometry(ray_visualize, 'ray')
                                #scene_.add_geometry(gripper_copy, node_name='gripper')
                                scene_.add_geometry(base_copy, node_name='base')
                                scene_.add_geometry(finger_left_copy, node_name='finger_left_copy')
                                scene_.add_geometry(finger_right_copy, node_name='finger_right_copy')
                                scene_.add_geometry(point_cloud, node_name='point_cloud')
                                if not is_collision and len(points_query) > 0:
                                    contact_points_cloud = trimesh.PointCloud(point_data[points_query], colors=[255, 0, 0])
                                    scene_.add_geometry(contact_points_cloud, node_name='contact_points_cloud')
                                show(scene_)
                                #scene_.delete_geometry('gripper')
                                scene_.delete_geometry('point_cloud')
                                scene_.delete_geometry('ray')
                                scene_.delete_geometry('base')
                                scene_.delete_geometry('finger_left_copy')
                                scene_.delete_geometry('finger_rifht_copy')
                                if not is_collision:
                                    scene_.delete_geometry('contact_points_cloud')
                                del scene_
                                
                            #del gripper_copy
                            del finger_left_copy
                            del finger_right_copy
                            del base_copy
                            #print('test a single grasp angle')
                            if not is_collision:
                                break
    
    #scene.show()
    del collision_manager
    del scene
    print('test a single scene')
    #save_dict(point_grasp_dict, k)

if __name__ =="__main__":

    debug_vis = False
    dst_path = '/home/v-wewei/code/two_stage_pointnet/dataset/multi_obj_dataset/image_tensors/tensors/'
    
    f = open('dataset/multi_obj_dataset/metadata.json', 'r')
    metadata = json.load(fp=f)
    f.close()
    
    gripper_path = '/home/v-wewei/code/two_stage_pointnet/data/grippers/yumi/gripper.obj'
    base_path = '/home/v-wewei/code/two_stage_pointnet/data/grippers/yumi/coarse/box_base.stl'
    finger_path = '/home/v-wewei/code/two_stage_pointnet/data/grippers/yumi/coarse/box_finger.stl'
    gripper = trimesh.load_mesh(gripper_path)
    base = trimesh.load_mesh(base_path)
    finger = trimesh.load_mesh(finger_path)
    T_gripper = trimesh.transformations.translation_matrix([0, 0, -0.0645])
    R_gripper = trimesh.transformations.euler_matrix(np.pi/2, np.pi/2, 0, 'rxyz')
    T_base = trimesh.transformations.translation_matrix([0, 0, -0.13])
    R_base = trimesh.transformations.euler_matrix(np.pi/2, np.pi/2, 0, 'rxyz')
    T_finger = trimesh.transformations.translation_matrix([0, -0.0065, -0.048])
    #R_finger = trimesh.transformations.euler_matrix(np.pi/2, np.pi/2, 0, 'rxyz')

    gripper.apply_transform(T_gripper)
    gripper.apply_transform(R_gripper)
    finger.apply_transform(T_finger)
    base.apply_transform(T_base)
    base.apply_transform(R_base)
    
    
    bin_mesh = trimesh.load_mesh('/home/v-wewei/code/two_stage_pointnet/dataset/data/bin/bin.obj')
    bin_mesh.visual.face_colors = [60, 30, 78, 240]
    grasp_path = '/home/v-wewei/code/two_stage_pointnet/generated_grasp_{}/'.format(2000)
    pool = mp.Pool(processes=mp.cpu_count())


    for i in range(0, 1):
        obj_id_file = np.load('dataset/multi_obj_dataset/state_tensors/tensors/obj_ids_{:05d}.npz'.format(i))
        obj_ids = obj_id_file['arr_0.npy'].reshape(-1, 8, 1)
        obj_pose_file = np.load('dataset/multi_obj_dataset/state_tensors/tensors/obj_poses_{:05d}.npz'.format(i))
        obj_poses = obj_pose_file['arr_0.npy'].reshape(-1, 8, 7)

        for j in range(2):
            depth_tensor =        np.load(os.path.join(dst_path, 'depth_im_{:05d}.npz'.format(i*2+j)))['arr_0.npy']
            camera_intrs_tensor = np.load(os.path.join(dst_path, 'camera_intrs_{:05d}.npz'.format(i*2+j)))['arr_0.npy']
            camera_pose_tensor =  np.load(os.path.join(dst_path, 'camera_pose_{:05d}.npz'.format(i*2+j)))['arr_0.npy']
            k = 200*i+j*100
            for image_data, camera_intr_, camera_pose in zip(depth_tensor, camera_intrs_tensor, camera_pose_tensor):
#                pool.apply_async(generate_label, args=(k, base, finger, bin_mesh, obj_ids, obj_poses, image_data, camera_intr_, camera_pose))
#                pool.apply(generate_label, args=(k, bin_mesh, obj_ids, obj_poses, image_data, camera_intr_, camera_pose))
                if k == 46:
                    generate_label(k, base, finger, bin_mesh,obj_ids, obj_poses, image_data, camera_intr_, camera_pose)
                k += 1
    #pool.close()
    #pool.join()

