import trimesh
import os
import numpy as np
import json
import pickle
from copy import deepcopy
from perception import DepthImage, CameraIntrinsics
from autolab_core import RigidTransform


def show(scene):
    scene.show()

debug = True
dst_path = '/home/v-wewei/code/two_stage_pointnet/dataset/multi_obj_dataset/image_tensors/tensors/'

f = open('dataset/multi_obj_dataset/metadata.json', 'r')
metadata = json.load(fp=f)

gripper_path = '/home/v-wewei/code/two_stage_pointnet/data/grippers/yumi/gripper.obj'
gripper = trimesh.load_mesh(gripper_path)
T = trimesh.transformations.translation_matrix([0, 0, -0.061])
R = trimesh.transformations.euler_matrix(np.pi/2, np.pi/2, 0, 'rxyz')
gripper.apply_transform(T)
gripper.apply_transform(R)

bin_mesh = trimesh.load_mesh('/home/v-wewei/code/two_stage_pointnet/dataset/data/bin/bin.obj')
bin_mesh.visual.face_colors = [60, 30, 78, 240]
for i in range(20):
    obj_id_file = np.load('dataset/multi_obj_dataset/state_tensors/tensors/obj_ids_{:05d}.npz'.format(i//5))
    obj_ids = obj_id_file['arr_0.npy'].reshape(-1, 10, 1)
    obj_pose_file = np.load('dataset/multi_obj_dataset/state_tensors/tensors/obj_poses_{:05d}.npz'.format(i//5))
    obj_poses = obj_pose_file['arr_0.npy'].reshape(-1, 10, 7)

    k = 0
    for j in range(5):
        depth_tensor =        np.load(os.path.join(dst_path, 'depth_im_{:05d}.npz'.format(i*20+j)))['arr_0.npy']
        camera_intrs_tensor = np.load(os.path.join(dst_path, 'camera_intrs_{:05d}.npz'.format(i*20+j)))['arr_0.npy']
        camera_pose_tensor = np.load(os.path.join(dst_path, 'camera_pose_{:05d}.npz'.format(i*20+j)))['arr_0.npy']
        for image_data, camera_intr_, camera_pose in zip(depth_tensor, camera_intrs_tensor, camera_pose_tensor):
            scene = trimesh.Scene()
            scene.add_geometry(bin_mesh)

            print(camera_pose)
            depth_image =  DepthImage(image_data, frame='camera')
            camera_intr = CameraIntrinsics(frame='camera', fx=camera_intr_[0], fy=camera_intr_[1], cx=camera_intr_[2], cy=camera_intr_[3], skew=camera_intr_[4], height=camera_intr_[5], width=camera_intr_[6])
            point_normal_cloud = depth_image.point_normal_cloud(camera_intr)
            point_data = point_normal_cloud.points.data
            T_matrix = trimesh.transformations.translation_matrix(camera_pose[:3])
            rotation_matrix = trimesh.transformations.quaternion_matrix(camera_pose[3:])
            matrix = trimesh.transformations.concatenate_matrices(T_matrix,rotation_matrix)
            matrix = np.asanyarray(matrix, order='C', dtype=np.float64)
            point_data = trimesh.transformations.transform_points(point_data.transpose(1,0), matrix)
           
            point_cloud = trimesh.PointCloud(point_data, colors=[0, 255, 0])
            #point_cloud.show()
    
        #for obj_ids_per_scene, obj_poses_per_scene in zip(obj_ids, obj_poses):
            obj_ids_per_scene, obj_poses_per_scene = obj_ids[k//5], obj_poses[k//5]
            obj_dict = {}
        
            for obj_id, obj_pose in zip(obj_ids_per_scene, obj_poses_per_scene):
                if obj_id == 4294967295:
                    continue
#                print(obj_id, obj_pose)
                obj_key = metadata['obj_ids'][str(obj_id[0])]
                obj_path = metadata['meshes'][obj_key]
                obj = trimesh.load_mesh(obj_path)
                T_matrix = trimesh.transformations.translation_matrix(obj_pose[:3])
                rotation_matrix = trimesh.transformations.quaternion_matrix(obj_pose[3:])
                matrix = trimesh.transformations.concatenate_matrices(T_matrix, rotation_matrix)
                obj.apply_transform(matrix)
                scene.add_geometry(obj)
                obj_dict[obj_key] = matrix
        
            for key, value in obj_dict.items():
                key = key.split('~')[1]
                files = os.listdir('/home/v-wewei/code/two_stage_pointnet/generated_grasp/{}'.format(key))
                for file_name in files:
                    if os.path.splitext(file_name)[1] == '.pickle':
                        grasps = pickle.load(open('/home/v-wewei/code/two_stage_pointnet/generated_grasp/{}/{}'.format(key, file_name), 'rb'))
                        for grasp in grasps:
                            angle_candidates = np.arange(-90, 120, 30)
                            np.random.shuffle(angle_candidates)
                            scene_ = deepcopy(scene)
                            for grasp_angle in angle_candidates:
                                grasp.approach_angle = grasp_angle
                                pose = RigidTransform(grasp.rotated_full_axis, grasp.center, from_frame='gripper', to_frame='obj')
                                gripper_copy = deepcopy(gripper)
                                transform = trimesh.transformations.concatenate_matrices(value, pose.matrix)
                                #print(transform)
                                #gripper_copy.apply_transform(pose.matrix)
                                #gripper_copy.apply_transform(value)
                                gripper_copy.apply_transform(transform)
                                collision_manager, _ = trimesh.collision.scene_to_collision(scene)
                                is_collision = collision_manager.in_collision_single(gripper_copy, return_names=False) 
                                if debug and not is_collision:
                                #if debug:
                                    scene_.add_geometry(gripper_copy, node_name='gripper')
                                    scene_.add_geometry(point_cloud, node_name='point_cloud')
                                    show(scene_)
                                    scene_.delete_geometry('gripper')
                                    scene_.delete_geometry('point_cloud')
                                    
                                del gripper_copy
                                #del collision_manager
                                print('test a single grasp angle')
                                if not is_collision:
                                    break
                            del scene_
            del scene
            k += 1
        
    



#print(camera_intrs_tensor)
#exit()



