import json
import os
import pickle

import numpy as np
import torch
import torch.utils.data as data
import time
from dataset import data_utils as d_utils
# from . import data_utils as d_utils
#import data_utils as d_utils
import torchvision.transforms as transforms
import random
import math
from copy import deepcopy
import trimesh
# # warnings.warn('WARNING', DeprecationWarning)
# import perception
from perception import DepthImage, CameraIntrinsics

BASE_DIR = os.path.dirname(os.path.abspath(__file__))





def generate_label( image_data, camera_intr_, camera_pose):

    ###--------------------------------point_noise_data------------------------#######
    noise = np.random.normal(0, 0.00075, image_data.shape)
    # -0.001 +0.001
    noise_limit = np.clip(noise, -0.00075, 0.00075) #0.00025

    image_data_noise = image_data + noise_limit
    depth_image_noise = DepthImage(image_data_noise, frame='camera')
    camera_intr = CameraIntrinsics(frame='camera', fx=camera_intr_[0], fy=camera_intr_[1], cx=camera_intr_[2],
                                   cy=camera_intr_[3], skew=camera_intr_[4], height=camera_intr_[5],
                                   width=camera_intr_[6])
    #point_normal_cloud_noise = depth_image_noise.point_normal_cloud(camera_intr)
    #point_data_noise = point_normal_cloud_noise.points.data
    point_data_noise = camera_intr.deproject_to_image(depth_image_noise).to_point_cloud().data

    T_matrix = trimesh.transformations.translation_matrix(camera_pose[:3])
    rotation_matrix = trimesh.transformations.quaternion_matrix(camera_pose[3:])
    matrix = trimesh.transformations.concatenate_matrices(T_matrix, rotation_matrix)
    matrix = np.asanyarray(matrix, order='C', dtype=np.float64)
    point_data_noise = trimesh.transformations.transform_points(point_data_noise.transpose(1, 0), matrix)
    point_data_noise = point_data_noise[point_data_noise[:, 2] > 0.01].astype(np.float16)

    return point_data_noise


class Grasp_SuctionBindataset(data.Dataset):
    def __init__(self, dataset_dir=None, num_points=20000, transforms=None, mode='train', use_normal=False,
                 platform='remote', non_uniform_sampling=True, aug_scene=True):
        super().__init__()
        assert dataset_dir is not None
        self.platform = platform
        self.use_normal = use_normal
        self.dataset_dir = dataset_dir
        self.transforms = transforms
        self.non_uniform_sampling = non_uniform_sampling
        self.aug_scene = aug_scene


        self.mode, self.num_points = mode, num_points
        assert self.mode in ['train', 'validate', 'test']
        print('dataset mode {}'.format(self.mode))
        if self.mode == 'train':
            self.idxs = np.load(os.path.join(self.dataset_dir, 'train_indices.npy'))
        elif self.mode == 'validate':
            self.idxs = np.load(os.path.join(self.dataset_dir, 'test_indices.npy'))
        else:
            self.idxs = np.array(range(len(self._list_data_files())))

        self.dict_ = {}

    def _list_data_files(self):
        file_list = []
        for root, dirs, files in os.walk(self.dataset_dir):
            for file_name in files:
                if file_name.endswith('.npy'):
                    file_path = os.path.join(root, file_name)
                    file_list.append(file_path)
        return file_list

    def _load_data_file(self, idx):
        if self.mode in ['train', 'validate']:
            # point_data = np.load(os.path.join(self.dataset_dir, 'pointcloud_noise_data/xyz_{:05d}.npy'.format(idx)))
            label_dict_path = os.path.join(self.dataset_dir, 'label_{:05d}.npy'.format(idx))
            label_dict = np.load(label_dict_path, allow_pickle=True).item()
            #time_begin = time.time()
            point_data = self.perturb_depth(label_dict)
            #time_cost = time.time() - time_begin
            #print('time_cost is :', time_cost)
            point_dict = label_dict['point_label']
            #point_dict_path = os.path.join(self.dataset_dir, 'image_{:05d}.npy'.format(idx))
            #point_dict = np.load(point_dict_path)
            #grasp_normal_label = np.load(os.path.join(self.dataset_dir, 'pointcloud_data_normal/xyz_normal_{:05d}.npy'.format(idx)))
            grasp_normal_label = label_dict['point_normal']
            if self.use_normal:
                assert 1 == 0
                return point_data, point_dict, grasp_normal_label
            else:
                return point_data[:, :3], point_dict, grasp_normal_label
        elif self.mode == 'test':
            point_data = np.load(os.path.join(self.dataset_dir, 'pointcloud_data/xyz_{:05d}.npy'.format(idx)))
            return point_data
        else:
            assert False, 'No mode {}'.format(self.mode)


    def perturb_depth(self, label_dict):

        dst_path = self.dataset_dir
        #image_data = np.load(os.path.join(dst_path, 'depth/depth_{:05d}.npy'.format(idx)))
        image_data = label_dict['depth_img']
        #camera_intr_ = np.load(os.path.join(dst_path, 'camera_intr/camera_intr_{:05d}.npy'.format(idx)))
        camera_intr = label_dict['camera_intr']
        #camera_pose = np.load(os.path.join(dst_path, 'camera_pose/camera_pose_{:05d}.npy'.format(idx)))
        camera_pose = label_dict['camera_pose']
        point_data = generate_label( image_data, camera_intr, camera_pose)

        return point_data

    def __getitem__(self, idx):
        time_start = time.time()
        dataset_idx = self.idxs[idx]

        if self.platform == 'remote':
            if self.mode in ['train', 'validate']:
                if dataset_idx not in self.dict_.keys():
                    point_data, label_dict_, grasp_normal_label = self._load_data_file(dataset_idx)
                else:
                    point_data, label_dict_ = self.dict_(dataset_idx)
            else:
                point_data = self._load_data_file(dataset_idx)


        elif self.platform == 'local':
            if self.mode in ['train', 'validate']:
                point_data, label_dict_ = self._load_data_file(dataset_idx)
            else:
                point_data = self._load_data_file(dataset_idx)
        else:
            raise Exception('do not support platform {}'.format(self.platform))
        time_data = time.time() - time_start
        
        #grasp_cls = []
        #grasp_score = []
        #angle_transform = []
        #matrix_transform = []
        #distance_label = []

        #suction_cls = []
        #suction_score = []

        #objectness_label = []
        #normals_label = []

        if self.mode == 'train':

            grasp_label = label_dict_[:, :24]
            suction_label = label_dict_[:, 24:]

            if self.aug_scene:
                rotation_angle = np.random.uniform() * 2 * np.pi
                rotation_matrix = d_utils.angle_axis(rotation_angle, np.array([0.0, 0.0, 1.0]))
            else:
                rotation_matrix = None
            if self.non_uniform_sampling:
                x_list = [0,0.01,0.02,0.03,0.04,0.05]
                y_list = [0,0.015,0.026,0.037,0.048,0.059]
                idxs = np.random.randint(0, len(x_list), size=1)
                # print(idxs)
                mask_1 = np.logical_and(abs(point_data[:, 1]) < (0.225-y_list[idxs[0]]), abs(point_data[:, 0]) < (0.275-x_list[idxs[0]]))
                mask_bin = ~mask_1
                mask_f = np.where(mask_1)
                mask_b = np.where(mask_bin)
                pro_f = (1 / len(mask_f[0])) * mask_1
                pro_b = (0 / len(mask_b[0])) * mask_bin
                pro = pro_f + pro_b
                # mask_2 = point_data[:, 2] > 0.0255
                # mask = mask_1 & mask_2
                # mask_3 = point_data[:, 2] < 0.0255
                # mask_f = np.where(mask)
                # mask_b = np.where(mask_3)
                # alpha = 0.3 * random.random()
                # beta = 0.3 - alpha
                # pro_f = ((0.45 + alpha) / len(mask_f[0])) * mask
                # pro_b = ((0.25 + beta) / len(mask_b[0])) * mask_3
                # print('pro_f', pro_f)
                # pro = pro_f + pro_b
                # pc = trimesh.PointCloud(point_data[mask_1], colors=[0, 255, 0])
                # pc.show()
                # pc_bin = trimesh.PointCloud(point_data[mask_bin],colors=[255, 0 ,0 ])
                # scene = trimesh.Scene([pc, pc_bin])
                # scene.show()

                # exit()



                choices = np.random.choice(point_data.shape[0], self.num_points, replace=False, p=pro)
            else:
                choices = np.random.choice(point_data.shape[0], self.num_points, replace=False)

            point_data_choice = point_data[choices, :]
            # pc = trimesh.PointCloud(point_data_choice, colors=[0, 255, 0])
            # pc.show()
            # exit()
            mean_point = point_data_choice[:, :3].mean(axis=0)
            new_point_data = np.hstack((point_data_choice[:, :3] - mean_point, point_data_choice[:, 3:]))

            if rotation_matrix is not None:
                new_point_data = np.matmul(new_point_data, rotation_matrix.T)


            time_before = time.time() - time_start - time_data

            # need to be numpy-like operation to speed up 
            transform = grasp_label[choices, 2:18].copy().reshape(-1, 4, 4)[:, :3]
            distance_label = grasp_label[choices, -3]
            if rotation_matrix is not None:
                #print('temp shape is :', np.dot(transform[:, :, 3] - point_data_choice, rotation_matrix.T).shape)
                #print(transform[:, :, 3].shape)
                #exit()
                transform[:, :, 3] = np.dot(transform[:, :, 3] - point_data_choice, rotation_matrix.T)
                transform[:, :, :3] = np.dot(rotation_matrix, transform[:, :, :3]).transpose((1, 0, 2))
            else:
                print('No aug_scene')
                transform[:, :, 3] -= point_data_choice
                assert False
            angle_ = np.arctan2(transform[:, 1, 1], transform[:, 0, 1])
            angle_[np.where(angle_ < 0)[0]] += np.pi 
            grasp_angle = angle_ / np.pi * 180
            azimuth_ = np.arctan2(transform[:, 1, 0], transform[:, 0, 0])
            azimuth_[np.where(azimuth_ < 0)[0]] += (2 * np.pi)
            azimuth_angle = azimuth_ / np.pi * 180
            elevation_ = np.arctan2(-transform[:, 2, 0], np.sqrt(transform[:, 0, 0] ** 2 + transform[:, 1, 0] ** 2))
            elevation_angle = elevation_ / np.pi * 180
            center_offset = grasp_label[choices, -2]
            grasp_width_new = grasp_label[choices,-1]
            center_offset_copy = deepcopy(center_offset)
            center_offset_copy[np.where(azimuth_ < 0)[0]] *=-1
            #print(distance.shape, azimuth_angle.shape, elevation_angle.shape, grasp_label[choices, -3]
            angle_transform = np.stack((distance_label, azimuth_angle, elevation_angle - 15.0, grasp_label[choices, -5], grasp_angle, grasp_width_new,
                                        center_offset_copy),axis=1)
            grasp_score = grasp_label[choices, 1]
            matrix_transform = transform[:, :3, :3]
            grasp_cls = grasp_label[choices, 0]

            if rotation_matrix is not None:
                normals_label = np.matmul(grasp_normal_label[choices], rotation_matrix.T)
            else:
                normals_label = grasp_normal_label[choices]
            suction_cls = suction_label[choices, 0]
            suction_score = suction_label[choices, 1]
            objectness_label = suction_label[choices, -1]


        elif self.mode == 'validate':
            grasp_label = label_dict_[:, :24]
            suction_label = label_dict_[:, 24:]
            if self.non_uniform_sampling:
                x_list = [0,0.01,0.02,0.03,0.04,0.05]
                y_list = [0,0.015,0.026,0.037,0.048,0.059]
                idxs = np.random.randint(0, len(x_list), size=1)
                # print(idxs)
                mask_1 = np.logical_and(abs(point_data[:, 1]) < (0.225-y_list[idxs[0]]), abs(point_data[:, 0]) < (0.275-x_list[idxs[0]]))
                mask_bin = ~mask_1
                mask_f = np.where(mask_1)
                mask_b = np.where(mask_bin)
                pro_f = (1 / len(mask_f[0])) * mask_1
                pro_b = (0 / len(mask_b[0])) * mask_bin
                pro = pro_f + pro_b
                # mask_1 = np.logical_and(abs(point_data[:, 1]) < 0.16, abs(point_data[:, 0]) < 0.22)
                # mask_2 = point_data[:, 2] > 0.0255
                # mask = mask_1 & mask_2
                # mask_3 = point_data[:, 2] < 0.0255
                # # mask_ = ~mask
                # mask_f = np.where(mask)
                # mask_b = np.where(mask_3)
                # pro_f = (0.6 / len(mask_f[0])) * mask
                # pro_b = (0.4 / len(mask_b[0])) * mask_3
                # pro = pro_f + pro_b
                # np.random.seed(1) # for debug use
                # mask_1 = grasp_label[:,-4] == 1
                # mask_2 = grasp_label[:,-4] == 0
                # mask_f = np.where(mask_1)
                # mask_b = np.where(mask_2)
                # pro_f =(0.9 / len(mask_f[0])) * mask_1
                # pro_b =(0.1 / len(mask_b[0])) * mask_2
                # pro = pro_f + pro_b
                choices = np.random.choice(point_data.shape[0], self.num_points, replace=False, p=pro)
            else:
                choices = np.random.choice(point_data.shape[0], self.num_points, replace=False)

            point_data_choice = point_data[choices, :]
            mean_point = point_data_choice[:, :3].mean(axis=0)
            # print("mean_point:", mean_point)
            new_point_data = np.hstack((point_data_choice[:, :3] - mean_point, point_data_choice[:, 3:]))

            # need to be numpy-like operation to speed up 
            transform = grasp_label[choices, 2:18].copy().reshape(-1, 4, 4)[:, :3]
            distance_label = grasp_label[choices, -3]
            #if rotation_matrix is not None:
            #    #print('temp shape is :', np.dot(transform[:, :, 3] - point_data_choice, rotation_matrix.T).shape)
            #    #print(transform[:, :, 3].shape)
            #    #exit()
            #    transform[:, :, 3] = np.dot(transform[:, :, 3] - point_data_choice, rotation_matrix.T)
            #    transform[:, :, :3] = np.dot(rotation_matrix, transform[:, :, :3]).transpose((1, 0, 2))
            #else:
            #    print('No aug_scene')
            #    transform[:, :, 3] -= point_data_choice
            #    assert False
            angle_ = np.arctan2(transform[:, 1, 1], transform[:, 0, 1])
            angle_[np.where(angle_ < 0)[0]] += np.pi 
            grasp_angle = angle_ / np.pi * 180
            azimuth_ = np.arctan2(transform[:, 1, 0], transform[:, 0, 0])
            azimuth_[np.where(azimuth_ < 0)[0]] += (2 * np.pi)
            azimuth_angle = azimuth_ / np.pi * 180
            elevation_ = np.arctan2(-transform[:, 2, 0], np.sqrt(transform[:, 0, 0] ** 2 + transform[:, 1, 0] ** 2))
            elevation_angle = elevation_ / np.pi * 180
            center_offset = grasp_label[choices, -2]
            grasp_width_new = grasp_label[choices,-1]
            center_offset_copy = deepcopy(center_offset)
            center_offset_copy[np.where(azimuth_ < 0)[0]] *=-1
            #print(distance.shape, azimuth_angle.shape, elevation_angle.shape, grasp_label[choices, -3]
            angle_transform = np.stack((distance_label, azimuth_angle, elevation_angle - 15.0, grasp_label[choices, -5], grasp_angle, grasp_width_new,
                                        center_offset_copy),axis=1)
            grasp_score = grasp_label[choices, 1]
            matrix_transform = transform[:, :3, :3]
            grasp_cls = grasp_label[choices][:, 0]
            normals_label = grasp_normal_label[choices]
            suction_cls = suction_label[choices, 0]
            suction_score = suction_label[choices, 1]
            objectness_label = suction_label[choices, -1]


        elif self.mode == 'test':
            if self.non_uniform_sampling:
                mask_1 = np.logical_and(abs(point_data[:, 1]) < 0.30, abs(point_data[:, 0] - 0.475) < 0.175)
                mask_2 = point_data[:, 2] > 0.005
                # mask_1 = np.logical_and(abs(point_data[:, 1])< 0.2725,abs(point_data[:,0]) < 0.1825)
                # mask_2 = point_data[:, 2]>0.031
                mask = mask_1 & mask_2
                mask_ = ~mask
                mask_f = np.where(mask)
                mask_b = np.where(mask_)
                pro_f = (0.25 / len(mask_f[0])) * mask
                pro_b = (0.75 / len(mask_b[0])) * mask_
                pro = pro_f + pro_b
                # np.random.seed(1) # for debug use
                choices = np.random.choice(point_data.shape[0], self.num_points, replace=False, p=pro)
            else:
                choices = np.random.choice(point_data.shape[0], self.num_points, replace=False)
            # forground_mask = np.where(np.logical_and(np.logical_and(abs(point_data[:,1])< 0.2725,abs(point_data[:, 0]) < 0.1825), point_data[:, 2] > 0.031))
            # background_mask = np.setdiff1d()

            # choices = np.sort(choices)
            point_data_choice = point_data[choices, :]
            mean_point = point_data_choice[:, :3].mean(axis=0)
            new_point_data = np.hstack((point_data_choice[:, :3] - mean_point, point_data_choice[:, 3:]))
        else:
            assert False, 'NO mode {}'.format(self.mode)

        #current_points = new_point_data.copy() # (N, 3)
        #grasp_cls = np.asarray(grasp_cls, dtype=np.int) # (N, ) 
        #grasp_score = np.asarray(grasp_score, dtype=np.float32) # (N, )
        #distance_label = np.asarray(distance_label, dtype=np.float32) # (N, )
        #angle_transform = np.asarray(angle_transform, dtype=np.float32) # (N, 5)
        #matrix_transform = np.asarray(matrix_transform, dtype=np.float32) # (N, 3, 3)
        #normals_label = np.asarray(normals_label,dtype=np.float32) # (N, 3)
        #suction_cls = np.asarray(suction_cls, dtype=np.int) # (N, )
        #suction_score = np.asarray(suction_score, dtype=np.float32) # (N, )
        #objectness_label = np.asarray(objectness_label, dtype=np.int) # (N, )
        ##print(current_points.shape, grasp_cls.shape, grasp_score.shape, distance_label.shape, angle_transform.shape, matrix_transform.shape, normals_label.shape, suction_cls.shape, suction_score.shape, objectness_label.shape)
        current_points = new_point_data.copy() # (N, 3)
        grasp_cls = grasp_cls.astype(np.int) # (N, ) 
        grasp_score = grasp_score.astype(np.float32) # (N, )
        distance_label = distance_label.astype(np.float32) # (N, )
        angle_transform = angle_transform.astype(np.float32) # (N, 5)
        matrix_transform = matrix_transform.astype(np.float32) # (N, 3, 3)
        normals_label = normals_label.astype(np.float32) # (N, 3)
        suction_cls = suction_cls.astype(np.int) # (N, )
        suction_score = suction_score.astype(np.float32) # (N, )
        objectness_label = objectness_label.astype(np.int) # (N, )
        time_label = time.time() - time_start - time_data
        #print('time data:', time_data, 'time_before:', time_before, 'time_label:', time_label)
        if self.transforms is not None:

            current_points = self.transforms(current_points)

        return current_points, (grasp_cls, grasp_score, angle_transform,
                                matrix_transform, distance_label, normals_label,
                                suction_cls,suction_score,objectness_label),\
               dataset_idx



    def __len__(self):
        return len(self.idxs)

    def set_num_points(self, pts):
        self.num_points = pts


if __name__ == "__main__":
    import trimesh
    import sys


    def change_ray_color(ray, color):
        colors = np.ones((len(ray.entities), 3))
        colors_1 = (colors * color).astype(np.uint8)  # approach
        ray.colors = colors_1

    np.random.seed(0)
    random.seed(0)
    torch.manual_seed(0)

    sys.path.append('../')
    from grasping.farthest_points_sampling import fps


    dataset = Grasp_SuctionBindataset(dataset_dir='/home/v-wewei/code/two_stage_pointnet/mask_label_all/',
                         num_points=int(8192*4),
                         transforms=transforms.Compose(
                             [d_utils.PointcloudToTensor(),
                              # d_utils.PointcloudJitter(std=0.0001, clip=0.0001),
                              ]),
                         mode='train', non_uniform_sampling=True)
    print(len(dataset))

    dloader = torch.utils.data.DataLoader(dataset, batch_size=1, num_workers=1, shuffle=False)
    for i, batch in enumerate(dloader, 0):

        data, labels, dataset_idx = batch

        import time


        data = data.cuda(non_blocking=True)
        grasp_cls_label = labels[0].cuda(non_blocking=True)
        grasp_score_label = labels[1].cuda(non_blocking=True)
        grasp_pose_label = labels[2].cuda(non_blocking=True) # width and grasp angle
        matrix_label = labels[3].cuda(non_blocking=True)
        distance_label = labels[4].cuda(non_blocking=True)
        normals_label = labels[5].cuda(non_blocking=True)
        suction_cls_label = labels[6].cuda(non_blocking=True)
        suction_score_label = labels[7].cuda(non_blocking=True)
        objectness_label = labels[8].cuda(non_blocking=True)





        torch.cuda.synchronize()
        time_start = time.time()

        torch.cuda.synchronize()

        grasp_fg_mask = (grasp_cls_label.view(-1) > 0)

        suction_fg_mask = (suction_cls_label.view(-1) > 0)
        data = data.cpu().numpy().squeeze()

        distance_label = distance_label.detach().cpu().numpy().squeeze()

        grasp_fg_mask = grasp_fg_mask.cpu()
        suction_fg_mask = suction_fg_mask.cpu()

        pointcloud_ori = trimesh.PointCloud(data, colors=[0, 255, 0])

        pointcloud_fg = trimesh.PointCloud(data[grasp_fg_mask], colors=[255, 0, 0])
        pointcloud_fg_suction = trimesh.PointCloud(data[suction_fg_mask], colors=[255, 0, 0])
        ray_origins = data[0:16300:100]
        normals_label = normals_label.cpu().numpy().squeeze()
        ray_directions = normals_label[0:16300:100]
        vis_path = np.hstack((ray_origins, ray_origins+ray_directions/100)).reshape(-1, 2, 3)
        ray_visualize = trimesh.load_path(vis_path)

        scene = trimesh.Scene()
        matrix_label = matrix_label.cpu().numpy().squeeze()

        grasp_pose_label = grasp_pose_label.cpu().numpy().squeeze()
        print(grasp_pose_label.shape)
        center_offset = grasp_pose_label[:,-1]
        grasp_width_new = grasp_pose_label[:,-2]
        print(grasp_width_new.shape)
        print('***********', center_offset.max(),center_offset.min(),'***********')

        import seaborn as sns
        import matplotlib.pyplot as plt

        # plt.figure(dpi=120)
        sns.set(style='dark')
        sns.set_style("dark", {"axes.facecolor": "#e9f3ea"})  # 修改背景色
        g = sns.displot(center_offset[grasp_fg_mask],
                        # hist=True,  # 默认绘制直方图，详细参考plt.hist
                        #  bins=50,
                         kde=False,
                         color="#098154")  # 修改柱子颜色
        plt.show()
        exit()
        # # print(matrix_label.shape)
        #
        # # print(distance_label[grasp_fg_mask])
        # for i in range(40):
        #     center = data[grasp_fg_mask][i] + matrix_label[:,:,0][grasp_fg_mask][i] * distance_label[grasp_fg_mask][i]
        #     offset_center = center + matrix_label[:, :, 1][grasp_fg_mask][i] * center_offset[grasp_fg_mask][i]
        #     grasp_axis = matrix_label[:,:,1][grasp_fg_mask][i]
        #     grasp_width = grasp_pose_label[:, 3][grasp_fg_mask][i]
        #     grasp_width_new_ = grasp_width_new[grasp_fg_mask][i]
        #     # print(grasp_width)
        #     # print(grasp_width_new_)
        #     # print(center_offset[grasp_fg_mask][i])
        #     # exit()
        #     vis_close_v1 = np.hstack((center, center - grasp_axis * grasp_width/2)).reshape(-1, 2, 3)
        #     vis_close_v2 = np.hstack((center, center + grasp_axis * grasp_width/2)).reshape(-1, 2, 3)
        #     vis_close_v1_ = np.hstack((offset_center, offset_center - grasp_axis * grasp_width_new_/2)).reshape(-1, 2, 3)
        #     vis_close_v2_ = np.hstack((offset_center, offset_center + grasp_axis * grasp_width_new_/2)).reshape(-1, 2, 3)
        #     ray_visualize_c1 = trimesh.load_path(vis_close_v1)
        #     ray_visualize_c2 = trimesh.load_path(vis_close_v2)
        #     ray_visualize_c1_ = trimesh.load_path(vis_close_v1_)
        #     ray_visualize_c2_ = trimesh.load_path(vis_close_v2_)
        #     change_ray_color(ray_visualize_c1_,color=[255, 0, 255])
        #     change_ray_color(ray_visualize_c2_, color=[255, 0, 255])
        #     pc_center = trimesh.PointCloud(center.reshape(1,3),colors=[255, 255, 0])
        #     pc_center_ = trimesh.PointCloud(offset_center.reshape(1, 3), colors=[0, 0, 200])
        #     scene.add_geometry(ray_visualize_c1)
        #     scene.add_geometry(ray_visualize_c2)
        #     scene.add_geometry(pc_center)
        #     scene.add_geometry(pc_center_)
        #     # scene.add_geometry(ray_visualize_c1_)
        #     # scene.add_geometry(ray_visualize_c2_)
        #
        # scene.add_geometry(pointcloud_ori)
        # scene.add_geometry(pointcloud_fg)
        # scene.add_geometry(ray_visualize)
        #
        # scene.show()
        #
        # scene2 = trimesh.Scene([pointcloud_ori,pointcloud_fg_suction,ray_visualize])
        # scene2.show()
