import numpy as np
import trimesh
import os
from perception import DepthImage, CameraIntrinsics
import autolab_core

dst_path = '/home/v-wewei/code/two_stage_pointnet/dataset/multi_obj_dataset/image_tensors/tensors/'
depth_tensor =        np.load(os.path.join(dst_path, 'depth_im_00000.npz'))['arr_0.npy']
camera_intrs_tensor = np.load(os.path.join(dst_path, 'camera_intrs_00000.npz'))['arr_0.npy']
camera_pose_tensor = np.load(os.path.join(dst_path, 'camera_pose_00000.npz'))['arr_0.npy']

#print(camera_intrs_tensor)
#exit()
for image_data, camera_intr_, camera_pose in zip(depth_tensor, camera_intrs_tensor, camera_pose_tensor):
    print(camera_pose)
    depth_image =  DepthImage(image_data, frame='camera')
    camera_intr = CameraIntrinsics(frame='camera', fx=camera_intr_[0], fy=camera_intr_[1], cx=camera_intr_[2], cy=camera_intr_[3], skew=camera_intr_[4], height=camera_intr_[5], width=camera_intr_[6])
    point_normal_cloud = depth_image.point_normal_cloud(camera_intr)
    point_data = point_normal_cloud.points.data
    T_matrix = trimesh.transformations.translation_matrix(camera_pose[:3])
    rotation_matrix = trimesh.transformations.quaternion_matrix(camera_pose[3:])
    matrix = trimesh.transformations.concatenate_matrices(T_matrix,rotation_matrix)
    matrix = np.asanyarray(matrix, order='C', dtype=np.float64)
    point_data = trimesh.transformations.transform_points(point_data.transpose(1,0), matrix)
   
    point_cloud = trimesh.PointCloud(point_data, colors=[0, 255, 0])
    point_cloud.show()

#print(depth_tensor.shape)





