import trimesh
import math
import scipy
import os
import numpy as np
import json
import pickle
from copy import deepcopy
from perception import DepthImage, CameraIntrinsics
from autolab_core import RigidTransform
import multiprocessing as mp
from grasping.grasp_sampler import GraspSampler


def show(scene):
    scene.show()


def generate_label(k, obj_ids, obj_poses, image_data, camera_intr_, camera_pose,bin_mesh):
    print('k is ', k)

    depth_image = DepthImage(image_data, frame='camera')
    camera_intr = CameraIntrinsics(frame='camera', fx=camera_intr_[0], fy=camera_intr_[1], cx=camera_intr_[2],
                                   cy=camera_intr_[3], skew=camera_intr_[4], height=camera_intr_[5],
                                   width=camera_intr_[6])
    point_normal_cloud = depth_image.point_normal_cloud(camera_intr)
    point_data = point_normal_cloud.points.data

    point_normal = point_normal_cloud.normals.data.transpose(1, 0)
    T_matrix = trimesh.transformations.translation_matrix(camera_pose[:3])
    rotation_matrix = trimesh.transformations.quaternion_matrix(camera_pose[3:])
    matrix = trimesh.transformations.concatenate_matrices(T_matrix, rotation_matrix)
    matrix = np.asanyarray(matrix, order='C', dtype=np.float64)
    point_data = trimesh.transformations.transform_points(point_data.transpose(1, 0), matrix)
    index = point_data[:, 2] > 0.01
    point_data = point_data[index]
    R_point_cloud = matrix[:3, :3]
    point_normal = point_normal[index]
    point_normal = np.matmul(point_normal, R_point_cloud.T)

    obj_ids_per_scene, obj_poses_per_scene = obj_ids[(k % 200) // 2], obj_poses[(k % 200) // 2]
    obj_list = []
    obj_dict = {}
    mesh_dict = {}
    scene = trimesh.Scene()
    for obj_id, obj_pose in zip(obj_ids_per_scene, obj_poses_per_scene):
        if obj_id == 4294967295:
            continue
        obj_key = metadata['obj_ids'][str(obj_id[0])]
        obj_path = metadata['meshes'][obj_key]
        obj = trimesh.load_mesh(obj_path)
        T_matrix = trimesh.transformations.translation_matrix(obj_pose[:3])
        rotation_matrix = trimesh.transformations.quaternion_matrix(obj_pose[3:])
        matrix = trimesh.transformations.concatenate_matrices(T_matrix, rotation_matrix)
        obj.apply_transform(matrix)
        scene.add_geometry(obj)
        obj_dict[obj_key] = matrix
        mesh_dict[obj_key] = obj
        obj_list.append(obj)

    signed_distance = trimesh.proximity.signed_distance(bin_mesh, point_data)
    mask = abs(signed_distance) < 0.0005
    mask_obj = ~mask
    # points_query_bin = [i for i in range(len(mask)) if mask[i]]
    points_query_obj = [i for i in range(len(mask_obj)) if mask_obj[i]]
    concatenated_mesh = trimesh.util.concatenate(obj_list)
    face_normals = concatenated_mesh.face_normals
    _,_,triangle_id = trimesh.proximity.closest_point(concatenated_mesh, point_data[points_query_obj])
    point_normal[points_query_obj] = face_normals[triangle_id]
    # print(triangle_id.shape)
    # concatenated_mesh.show()
    # exit()
    all_point = trimesh.PointCloud(point_data, colors=[0, 255, 0])
    scene.add_geometry(all_point)

    ray_origins = point_data[0:200000:5]
    ray_directions = point_normal[0:200000:5]
    vis_path = np.hstack((ray_origins, ray_origins + ray_directions / 100)).reshape(-1, 2, 3)
    ray_visualize = trimesh.load_path(vis_path)
    print('show normal')
    scene.add_geometry(ray_visualize)
    scene.show()


if __name__ == "__main__":

    dst_path = '/home/lifuyu/data_disk/v-wewei_data/multi_obj_dataset_0319/image_tensors/tensors/'

    f = open('/home/lifuyu/data_disk/v-wewei_data/multi_obj_dataset_0319/metadata.json', 'r')
    metadata = json.load(fp=f)
    f.close()

    bin_mesh = trimesh.load_mesh('/home/v-wewei/code/two_stage_pointnet/dataset/data/bin/bin.obj')
    bin_mesh.visual.face_colors = [60, 30, 78, 240]
    #    pool = mp.Pool(processes=mp.cpu_count())

    for i in range(0, 50):
        obj_id_file = np.load(
            '/home/lifuyu/data_disk/v-wewei_data/multi_obj_dataset_0319/state_tensors/tensors/obj_ids_{:05d}.npz'.format(
                i))
        obj_ids = obj_id_file['arr_0.npy'].reshape(-1, 15, 1)
        obj_pose_file = np.load(
            '/home/lifuyu/data_disk/v-wewei_data/multi_obj_dataset_0319/state_tensors/tensors/obj_poses_{:05d}.npz'.format(
                i))
        obj_poses = obj_pose_file['arr_0.npy'].reshape(-1, 15, 7)

        for j in range(2):
            depth_tensor = np.load(os.path.join(dst_path, 'depth_im_{:05d}.npz'.format(i * 2 + j)))['arr_0.npy']
            camera_intrs_tensor = np.load(os.path.join(dst_path, 'camera_intrs_{:05d}.npz'.format(i * 2 + j)))[
                'arr_0.npy']
            camera_pose_tensor = np.load(os.path.join(dst_path, 'camera_pose_{:05d}.npz'.format(i * 2 + j)))[
                'arr_0.npy']
            k = 200 * i + j * 100
            for image_data, camera_intr_, camera_pose in zip(depth_tensor, camera_intrs_tensor, camera_pose_tensor):
                # if k % 2 != 0 :
                if k != 9:
                    k += 1
                    continue

                #                pool.apply_async(generate_label, args=(k, obj_ids, obj_poses, image_data, camera_intr_, camera_pose))
                #                pool.apply(generate_label, args=(k,gripper, bin_mesh, obj_ids, obj_poses, image_data, camera_intr_, camera_pose))
                #                 if k % 500 == 0:
                generate_label(k, obj_ids, obj_poses, image_data, camera_intr_, camera_pose,bin_mesh)
                # k += 1
#    pool.close()
#    pool.join()

