import trimesh
import math
import scipy
import os
import numpy as np
import json
import pickle
from copy import deepcopy
from perception import DepthImage, CameraIntrinsics
from autolab_core import RigidTransform
import multiprocessing as mp
from grasping.grasp_sampler import GraspSampler


def show(scene):
    scene.show()


def generate_label(k, obj_ids, obj_poses, image_data, camera_intr_, camera_pose):
    print('k is ', k)
    # if os.path.exists('./mask_label/image_{:05d}.npz'.format(k)):
    #    print('./mask_label/image_{:05d}.npz already exists'.format(k))
    #    return
    # scene = trimesh.Scene()
    # scene.add_geometry(bin_mesh)

    depth_image =  DepthImage(image_data, frame='camera')
    camera_intr = CameraIntrinsics(frame='camera', fx=camera_intr_[0], fy=camera_intr_[1], cx=camera_intr_[2], cy=camera_intr_[3], skew=camera_intr_[4], height=camera_intr_[5], width=camera_intr_[6])
    point_normal_cloud = depth_image.point_normal_cloud(camera_intr)
    point_data = point_normal_cloud.points.data

    point_normal = point_normal_cloud.normals.data.transpose(1, 0)
    T_matrix = trimesh.transformations.translation_matrix(camera_pose[:3])
    rotation_matrix = trimesh.transformations.quaternion_matrix(camera_pose[3:])
    # R_point_cloud = rotation_matrix[:3, :3]
    matrix = trimesh.transformations.concatenate_matrices(T_matrix,rotation_matrix)
    matrix = np.asanyarray(matrix, order='C', dtype=np.float64)
    point_data = trimesh.transformations.transform_points(point_data.transpose(1,0), matrix)
    index = point_data[:, 2] > 0.01
    point_data = point_data[index]
    R_point_cloud = matrix[:3, :3]
    point_normal = point_normal[index]
    point_normal = np.matmul(point_normal, R_point_cloud.T)

    obj_ids_per_scene, obj_poses_per_scene = obj_ids[(k%200)//2], obj_poses[(k%200)//2]
    obj_dict = {}
    mesh_dict = {}
    scene = trimesh.Scene()
    obj_list = []
    for obj_id, obj_pose in zip(obj_ids_per_scene, obj_poses_per_scene):
        if obj_id == 4294967295:
            continue
        # print(obj_id, obj_pose)
        obj_key = metadata['obj_ids'][str(obj_id[0])]
        # print(obj_key)
        obj_path = metadata['meshes'][obj_key]
        obj_path = os.path.join('/home/v-wewei/finish_stl/finish_stl_good/', obj_path.split('/')[-1])
        obj = trimesh.load_mesh(obj_path)
        obj_list.append(obj)
        T_matrix = trimesh.transformations.translation_matrix(obj_pose[:3])
        rotation_matrix = trimesh.transformations.quaternion_matrix(obj_pose[3:])
        matrix = trimesh.transformations.concatenate_matrices(T_matrix, rotation_matrix)
        obj.apply_transform(matrix)
        scene.add_geometry(obj)
        obj_dict[obj_key] = matrix
        mesh_dict[obj_key] = obj

    concatenate_meshes = trimesh.util.concatenate(obj_list, b=None)
    signed_distance = trimesh.proximity.signed_distance(bin_mesh, point_data)
    mask = abs(signed_distance) < 0.0005
    mask_obj = ~mask
    # points_query_bin = [i for i in range(len(mask)) if mask[i]]
    points_query_obj = [i for i in range(len(mask_obj)) if mask_obj[i]]
    _, _, triangle_id = trimesh.proximity.closest_point(concatenate_meshes, point_data[points_query_obj])
    concatenate_meshes_face_normals = concatenate_meshes.face_normals
    # for tri_idx in triangle_id:
    #     concatenate_meshes.visual.face_colors[tri_idx] = np.array([255, 0, 0, 255])
    # concatenate_meshes.show()
    # exit()
    point_normal[points_query_obj] = concatenate_meshes_face_normals[triangle_id]
    # np.save(
    #     '/data1/v-wewei_data/pointcloud_data_normal/xyz_normal_{:05d}.npy'.format(k),
    #     point_normal.astype(np.float16))
    # print('{} is finished'.format(k))

    all_point = trimesh.PointCloud(point_data, colors=[0, 255, 0])
    scene.add_geometry(all_point)
    ray_origins = point_data[0:200000:10]
    ray_directions = point_normal[0:200000:10]
    vis_path = np.hstack((ray_origins, ray_origins+ray_directions/100)).reshape(-1, 2, 3)
    ray_visualize = trimesh.load_path(vis_path)
    print('show normal')
    scene.add_geometry(ray_visualize)
    scene.show()
   

if __name__ =="__main__":

    obj_dataset_path = '/home/lifuyu/data_disk/v-wewei_data/multi_obj_dataset_good_0326/'
    
    f = open(os.path.join(obj_dataset_path, 'metadata.json'), 'r')
    metadata = json.load(fp=f)
    f.close()
    
    gripper_path = '/home/v-wewei/code/two_stage_pointnet/data/grippers/yumi/gripper.obj'
    gripper = trimesh.load_mesh(gripper_path)
    T = trimesh.transformations.translation_matrix([0, 0, -0.061])
    R = trimesh.transformations.euler_matrix(np.pi/2, np.pi/2, 0, 'rxyz')
    gripper.apply_transform(T)
    gripper.apply_transform(R)
    
    bin_mesh = trimesh.load_mesh('/home/v-wewei/code/two_stage_pointnet/dataset/data/bin/bin.obj')
    bin_mesh.visual.face_colors = [60, 30, 78, 240]
    pool = mp.Pool(processes=mp.cpu_count())

    for i in range(0,50):
        obj_id_file = np.load(os.path.join(obj_dataset_path, 'state_tensors/tensors/obj_ids_{:05d}.npz'.format(i)))
        obj_ids = obj_id_file['arr_0.npy'].reshape(-1, 15, 1)
        obj_pose_file = np.load(os.path.join(obj_dataset_path, 'state_tensors/tensors/obj_poses_{:05d}.npz'.format(i)))
        obj_poses = obj_pose_file['arr_0.npy'].reshape(-1, 15, 7)

        for j in range(2):
            depth_tensor =        np.load(os.path.join(obj_dataset_path, 'image_tensors/tensors/depth_im_{:05d}.npz'.format(i*2+j)))['arr_0.npy']
            camera_intrs_tensor = np.load(os.path.join(obj_dataset_path, 'image_tensors/tensors/camera_intrs_{:05d}.npz'.format(i*2+j)))['arr_0.npy']
            camera_pose_tensor =  np.load(os.path.join(obj_dataset_path, 'image_tensors/tensors/camera_pose_{:05d}.npz'.format(i*2+j)))['arr_0.npy']
            k = 200*i+j*100
            for image_data, camera_intr_, camera_pose in zip(depth_tensor, camera_intrs_tensor, camera_pose_tensor):
                # if k % 2 != 0 :
                if k != 50:
                    k += 1
                    continue
                
                # pool.apply_async(generate_label, args=(k, obj_ids, obj_poses, image_data, camera_intr_, camera_pose))
#                pool.apply(generate_label, args=(k,gripper, bin_mesh, obj_ids, obj_poses, image_data, camera_intr_, camera_pose))
#                 if k % 500 == 0:
                generate_label(k, obj_ids, obj_poses, image_data, camera_intr_, camera_pose)
                k += 1
    pool.close()
    pool.join()

