import trimesh
import math
import scipy
import os
import numpy as np
import json
import pickle
from copy import deepcopy
from perception import DepthImage, CameraIntrinsics
from autolab_core import RigidTransform
import multiprocessing as mp
from grasping.grasp_sampler import GraspSampler
import open3d as o3d


def show(scene):
    scene.show()


def generate_label(k, obj_ids, obj_poses, image_data, camera_intr_, camera_pose):
    print('k is ', k)
    # if os.path.exists('./mask_label/image_{:05d}.npz'.format(k)):
    #    print('./mask_label/image_{:05d}.npz already exists'.format(k))
    #    return
    #    scene = trimesh.Scene()
    #    scene.add_geometry(bin_mesh)

    # depth_image =  DepthImage(image_data, frame='camera')
    # camera_intr = CameraIntrinsics(frame='camera', fx=camera_intr_[0], fy=camera_intr_[1], cx=camera_intr_[2],
    # cy=camera_intr_[3], skew=camera_intr_[4],
    # height=camera_intr_[5], width=camera_intr_[6])
    # point_normal_cloud = depth_image.point_normal_cloud(camera_intr)
    # point_data = point_normal_cloud.points.data
    # point_normal = point_normal_cloud.normals.data.transpose(1, 0)
    # T_matrix = trimesh.transformations.translation_matrix(camera_pose[:3])
    # rotation_matrix = trimesh.transformations.quaternion_matrix(camera_pose[3:])
    # R_point_cloud = rotation_matrix[:3, :3]
    # matrix = trimesh.transformations.concatenate_matrices(T_matrix,rotation_matrix)
    # matrix = np.asanyarray(matrix, order='C', dtype=np.float64)
    # point_data = trimesh.transformations.transform_points(point_data.transpose(1,0), matrix)
    #
    # point_data = point_data[point_data[:, 2] > 0.01]
    #
    # point_normal = np.matmul(point_normal, R_point_cloud.T)
    # point_data_normal = np.hstack((point_data, point_normal))
    # print(point_normal[10000])
    scene = trimesh.Scene()
    scene.add_geometry(bin_mesh)

    depth_image = DepthImage(image_data, frame='camera')
    camera_intr = CameraIntrinsics(frame='camera', fx=camera_intr_[0], fy=camera_intr_[1], cx=camera_intr_[2],
                                   cy=camera_intr_[3], skew=camera_intr_[4], height=camera_intr_[5],
                                   width=camera_intr_[6])
    point_normal_cloud = depth_image.point_normal_cloud(camera_intr)
    point_data = point_normal_cloud.points.data
    # point_normal = point_normal_cloud.normals.data.transpose(1, 0)
    T_matrix = trimesh.transformations.translation_matrix(camera_pose[:3])
    rotation_matrix = trimesh.transformations.quaternion_matrix(camera_pose[3:])
    matrix = trimesh.transformations.concatenate_matrices(T_matrix, rotation_matrix)
    matrix = np.asanyarray(matrix, order='C', dtype=np.float64)
    point_data = trimesh.transformations.transform_points(point_data.transpose(1, 0), matrix)
    point_data = point_data[point_data[:, 2] > 0.01]
    # pcd = o3d.geometry.PointCloud()
    # pcd.points = o3d.utility.Vector3dVector(point_data)
    # vis = o3d.visualization.Visualizer()
    # vis.create_window()
    # vis.add_geometry(pcd)
    # vis.run()
    # vis.destroy_window()

    # R_point_cloud = matrix[:3, :3]
    # point_normal = np.matmul(point_normal, R_point_cloud.T)

    obj_ids_per_scene, obj_poses_per_scene = obj_ids[(k % 200) // 2], obj_poses[(k % 200) // 2]
    obj_dict = {}
    mesh_dict = {}
    scene = trimesh.Scene()
    for obj_id, obj_pose in zip(obj_ids_per_scene, obj_poses_per_scene):
        if obj_id == 4294967295:
            continue
        # print(obj_id, obj_pose)
        obj_key = metadata['obj_ids'][str(obj_id[0])]
        print(obj_key)
        # exit()
        obj_path = metadata['meshes'][obj_key]
        obj_path = os.path.join('/home/v-wewei/finish_stl/finish_stl_good/', obj_path.split('/')[-1])
        obj = trimesh.load_mesh(obj_path)
        trimesh.smoothing.filter_humphrey(obj)
        T_matrix = trimesh.transformations.translation_matrix(obj_pose[:3])
        rotation_matrix = trimesh.transformations.quaternion_matrix(obj_pose[3:])
        matrix = trimesh.transformations.concatenate_matrices(T_matrix, rotation_matrix)
        obj.apply_transform(matrix)
        scene.add_geometry(obj)
        obj_dict[obj_key] = matrix
        mesh_dict[obj_key] = obj
    scene.add_geometry(bin_mesh)
    mask_data = np.load('./mask_label_all/label_{:05d}.npy'.format(k),
                        allow_pickle=True).item()
    mask_dict_ = mask_data['point_label']
    # print(type(mask_dict_))
    negtive_point = []
    collision_point = []
    positive_point = []
    show_grasp = True
    for key in range(mask_dict_.shape[0]):
        if show_grasp:
            if mask_dict_[key][0] == 0 and mask_dict_[key][1] == 0.0:
                negtive_point.append(key)
            elif mask_dict_[key][0] == 0 and mask_dict_[key][1] > 0.0:
                collision_point.append(key)
            elif mask_dict_[key][0] == 1 :
                positive_point.append(key)
        else:
            if mask_dict_[key][-6] == 0 and mask_dict_[key][-5] == 0.0:
                negtive_point.append(key)
            elif mask_dict_[key][-6] == 0 and mask_dict_[key][-5] > 0.0:
                collision_point.append(key)
            elif mask_dict_[key][-6] == 1 :
                positive_point.append(key)
    positive_point_ = trimesh.PointCloud(point_data[positive_point], colors=[0, 255, 0])
    negative_point_ = trimesh.PointCloud(point_data[negtive_point], colors=[255, 0, 0])
    collision_point_ = trimesh.PointCloud(point_data[collision_point], colors=[0, 0, 255])
    # all_point = trimesh.PointCloud(point_data, colors=[255, 255, 0])
    # scene.add_geometry(all_point)
    scene.add_geometry(negative_point_)
    scene.add_geometry(collision_point_)
    scene.add_geometry(positive_point_)
    # ray_origins = point_data[0:200000:20]
    # ray_directions = point_normal[0:200000:20]
    # vis_path = np.hstack((ray_origins, ray_origins+ray_directions/100)).reshape(-1, 2, 3)
    # ray_visualize = trimesh.load_path(vis_path)
    # print('show normal')
    # scene.add_geometry(ray_visualize)
    scene.show()


if __name__ == "__main__":

    obj_dataset_path = '/home/lifuyu/data_disk/v-wewei_data/multi_obj_dataset_good_0326'

    f = open(os.path.join(obj_dataset_path, 'metadata.json'), 'r')
    metadata = json.load(fp=f)
    f.close()

    # gripper_path = '/home/v-wewei/code/two_stage_pointnet/data/grippers/yumi/suction_gripper.obj'
    # gripper = trimesh.load_mesh(gripper_path)
    # T = trimesh.transformations.translation_matrix([0, 0, -0.061])
    # R = trimesh.transformations.euler_matrix(np.pi/2, np.pi/2, 0, 'rxyz')
    # gripper.apply_transform(T)
    # gripper.apply_transform(R)

    # suction_gripper_path = '/home/v-wewei/code/two_stage_pointnet/data/grippers/yumi/suction_gripper.obj'
    # base_path = '/home/v-wewei/code/two_stage_pointnet/data/grippers/yumi/coarse/box_base.stl'
    # finger_path = '/home/v-wewei/code/two_stage_pointnet/data/grippers/yumi/coarse/box_finger.stl'
    # suction_gripper = trimesh.load_mesh(suction_gripper_path)
    # base = trimesh.load_mesh(base_path)
    # finger = trimesh.load_mesh(finger_path)
    # T_gripper = trimesh.transformations.translation_matrix([0, 0, 0])
    # R_gripper = trimesh.transformations.euler_matrix(np.pi/2, np.pi/2, 0, 'rxyz')
    # T_base = trimesh.transformations.translation_matrix([0, 0, -0.131])
    # R_base = trimesh.transformations.euler_matrix(np.pi/2, np.pi/2, 0, 'rxyz')
    # T_finger = trimesh.transformations.translation_matrix([0, -0.0065, -0.049])
    #
    # suction_gripper.apply_transform(T_gripper)
    # finger.apply_transform(T_finger)
    # base.apply_transform(T_base)
    # base.apply_transform(R_base)

    bin_mesh = trimesh.load_mesh('/home/v-wewei/code/two_stage_pointnet/dataset/data/bin/bin.obj')
    bin_mesh.visual.face_colors = [60, 30, 78, 240]
    #    pool = mp.Pool(processes=mp.cpu_count())

    for i in range(0, 50):
        obj_id_file = np.load(os.path.join(obj_dataset_path, 'state_tensors/tensors/obj_ids_{:05d}.npz'.format(i)))
        obj_ids = obj_id_file['arr_0.npy'].reshape(-1, 15, 1)
        obj_pose_file = np.load(os.path.join(obj_dataset_path, 'state_tensors/tensors/obj_poses_{:05d}.npz'.format(i)))
        obj_poses = obj_pose_file['arr_0.npy'].reshape(-1, 15, 7)

        for j in range(2):
            depth_tensor = \
            np.load(os.path.join(obj_dataset_path, 'image_tensors/tensors/depth_im_{:05d}.npz'.format(i * 2 + j)))[
                'arr_0.npy']
            camera_intrs_tensor = \
            np.load(os.path.join(obj_dataset_path, 'image_tensors/tensors/camera_intrs_{:05d}.npz'.format(i * 2 + j)))[
                'arr_0.npy']
            camera_pose_tensor = \
            np.load(os.path.join(obj_dataset_path, 'image_tensors/tensors/camera_pose_{:05d}.npz'.format(i * 2 + j)))[
                'arr_0.npy']
            k = 200 * i + j * 100
            for image_data, camera_intr_, camera_pose in zip(depth_tensor, camera_intrs_tensor, camera_pose_tensor):
                if k % 2 != 0:
                    k += 1
                    continue
                # if k != 100:
                #     k+= 1
                #     continue

                # pool.apply_async(generate_label, args=(k, obj_ids, obj_poses, image_data, camera_intr_,
                # camera_pose)) pool.apply(generate_label, args=(k,gripper, bin_mesh, obj_ids, obj_poses, image_data,
                # camera_intr_, camera_pose)) if k % 500 == 0:
                generate_label(k, obj_ids, obj_poses, image_data, camera_intr_, camera_pose)
                k += 1
#    pool.close()
#    pool.join()
