import torch
import numpy as np
import torch.nn as nn
import torchvision.transforms as transforms
import sys
import transforms3d as t3d
import open3d as o3d


sys.path.append('../')
import dataset.data_utils as d_utils
from dataset.BindatasetLoader import Bindataset
from models.two_stage_graspnet import Two_Stage_GraspNet
from models.pointnet2_msg_sem import PointNet2SemSegMSG
from models.roi_head import RoI_Head
#from common.utils.config import merge_cfg_into_cfg
from common.lib.metric import AverageMeter
from common.lib.bbox import decode_bbox_target, angle_to_vector, grasp_angle_to_vector, rotation_from_vector
from utils.pointnet2_utils import furthest_point_sample, gather_operation
import trimesh
import yaml
import os
import trimesh.viewer
import pyglet
import glooey
import torch.backends.cudnn as cudnn
import random


os.environ['CUDA_VISIBLE_DEVICES']= '0'
random.seed(1)
np.random.seed(1)
torch.manual_seed(1)
torch.cuda.manual_seed(1)
cudnn.benchmark = False
cudnn.determinstic = True

class Application:

    """
    Example application that includes moving camera, scene and image update.
    """

    def __init__(self, scene_1, scene_2):
        # create window with padding
        self.width, self.height = 920 * 2, 960
        window = self._create_window(width=self.width, height=self.height)

        gui = glooey.Gui(window)

        hbox = glooey.HBox()
        hbox.set_padding(5)

        # scene widget for changing camera location
        self.scene_widget1 = trimesh.viewer.SceneWidget(scene_1)
        hbox.add(self.scene_widget1)

        # scene widget for changing scene
        self.scene_widget2 = trimesh.viewer.SceneWidget(scene_2)
        hbox.add(self.scene_widget2)

        # integrate with other widget than SceneWidget

        gui.add(hbox)

        pyglet.app.run()


    def _create_window(self, width, height):
        try:
            config = pyglet.gl.Config(sample_buffers=1,
                                      samples=4,
                                      depth_size=24,
                                      double_buffer=True)
            window = pyglet.window.Window(config=config,
                                          width=width,
                                          height=height)
        except pyglet.window.NoSuchConfigException:
            config = pyglet.gl.Config(double_buffer=True)
            window = pyglet.window.Window(config=config,
                                          width=width,
                                          height=height)

        @window.event
        def on_key_press(symbol, modifiers):
            if modifiers == 0:
                if symbol == pyglet.window.key.Q:
                    window.close()

        return window

def accuracy(output, target, topk=(1,)):
    """Computes the accuracy over the k top predictions for the specified values of k"""
    with torch.no_grad():
        output = output.transpose(1, 2)
        output = output.contiguous().view(-1, output.size(-1))
        target = target.view(-1)
        maxk = max(topk)
        batch_size = target.size(0)
#        print(index.shape)
#        print('top@', maxk)
        _, pred = output.topk(maxk, 1, True, True)
        pred = pred.t()
#        print('pred', pred)
        correct = pred.eq(target.view(1, -1).expand_as(pred))
        res = []
        for k in topk:
            correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
            res.append(correct_k.mul_(100.0 / batch_size))
        return res, pred
 #0.041  0.04  0.018
def draw_box(center, R, l=0.041, w=0.04, h=0.018):
    x_corners = [0.004, 0.004, -(l-0.004), -(l-0.004), 0.004, 0.004, -(l -0.004), -(l-0.004)]
    #x_corners = [x + center[0][0] for x in x_corners] 
    y_corners = [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2]
    #y_corners = [y + center[0][1] for y in y_corners]
    z_corners = [h / 2, h / 2, h / 2, h / 2, -h / 2, -h / 2, -h / 2, -h / 2] 
    #z_corners = [z + center[0][2] for z in z_corners]
    #print(np.vstack([x_corners, y_corners, z_corners]))
    corners_3d = np.dot(R, np.vstack([x_corners, y_corners, z_corners]))
    #print(corners_3d)
    box_sample = corners_3d + np.expand_dims(center, -1)
    box_sample = box_sample.transpose(0, 2, 1)
    
    ray_origins = np.array([box_sample[:, 0], box_sample[:, 2], box_sample[:, 5], box_sample[:, 7]]*3)
   # ray_origins = np.array([box_sample[:, 2], box_sample[:, 3]]*2)
    x_axis = R[:, :, 0]
    y_axis = R[:, :, 1]
    z_axis = R[:, :, 2]
    ray_directions = np.array([-w*y_axis, w*y_axis, w*y_axis, -w*y_axis,  -l*x_axis, l*x_axis, -l*x_axis, l*x_axis, -h*z_axis, -h*z_axis, h*z_axis, h*z_axis]) #[-0.004, 0.004, 0.004, -0.004]
    #ray_directions = np.array([l*x_axis, l*x_axis, -w*y_axis, w*y_axis]) #[-0.004, 0.004, 0.004, -0.004]
    #ray_directions = np.array([l*x_axis, l*x_axis, w*y_axis, -w*y_axis]) #[-0.004, 0.004, 0.004, -0.004]
    #print(ray_origins.shape)
    #print(ray_directions.shape)
    path = trimesh.load_path(np.hstack((ray_origins.reshape(-1, 3), (ray_origins + ray_directions).reshape(-1, 3))).reshape(-1, 2, 3))
    #print(dir(path))
    # path.colors = [0,255,0,0.8]
    #exit()
    #print(ray_origins.shape)
    #print(np.expand_dims(ray_directions, 1).shape)
    #temp = ray_origins+np.expand_dims(ray_directions, 1)
    #print(ray_origins[0][0], temp[0][0])
    #print(ray_origins[0], temp[0][1])


    #exit()
    #print(ray_origins + np.expand_dims(ray_directions, 1))
    #print(ray_origins.transpose(1, 0, 2).shape)
    #path = trimesh.load_path(np.hstack((ray_origins, ray_origins + ray_directions)))
    return path
    #path.show()
    #exit()


def draw_box2(center, R, l=0.03, w=0.035, h=0.032):
    x_corners = [0.004, 0.004, -(l-0.004), -(l-0.004), 0.004, 0.004, -(l -0.004), -(l-0.004)]
    #x_corners = [x + center[0][0] for x in x_corners]
    y_corners = [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2]
    #y_corners = [y + center[0][1] for y in y_corners]
    z_corners = [h / 2, h / 2, h / 2, h / 2, -h / 2, -h / 2, -h / 2, -h / 2]
    #z_corners = [z + center[0][2] for z in z_corners]
    #print(np.vstack([x_corners, y_corners, z_corners]))
    corners_3d = np.dot(R, np.vstack([x_corners, y_corners, z_corners]))
    #print(corners_3d)
    box_sample = corners_3d + np.expand_dims(center, -1)
    box_sample = box_sample.transpose(0, 2, 1)
    return box_sample

def bounding_box(bound, color):
    # bound_x = bound[0]
    # bound_y = bound[1]
    # bound_z = bound[2]

    point_1 = bound[0]#[bound_x, bound_y, bound_z]
    point_2 = bound[1]#[-bound_x, bound_y, bound_z]
    point_3 = bound[2]#[-bound_x, -bound_y, bound_z]
    point_4 = bound[3]#[bound_x, -bound_y, bound_z]
    point_5 = bound[4]#[bound_x, -bound_y, -bound_z]
    point_6 = bound[5]#[bound_x, bound_y, -bound_z]
    point_7 = bound[6]#[-bound_x, bound_y, -bound_z]
    point_8 = bound[7]#[-bound_x, -bound_y, -bound_z]

    points = [point_1, point_2, point_3, point_4,
              point_5, point_6, point_7, point_8]

    lines = [[0, 1], [1, 2], [2, 3], [3, 0],
             [1, 5], [2, 6], [3, 7], [0, 4],
             [4, 5], [5, 6], [6, 7], [7, 4]]

    line_color = [color for i in range(len(lines))]
    line_set = o3d.LineSet()
    line_set.points = o3d.Vector3dVector(points)
    line_set.lines = o3d.Vector2iVector(lines)
    line_set.colors = o3d.Vector3dVector(line_color)

    return line_set
    
b = 1
mode = 'validate'
from common.utils.config_ import cfg, cfg_from_yaml_file, merge_new_config_file
config = cfg_from_yaml_file('../experiments/base_config.yaml', cfg)
config = merge_new_config_file(config, '../experiments/local_msg_classification_config.yaml')
#config = merge_cfg_into_cfg('../experiments/local_msg_classification_config.yaml', '../experiments/base_config.yaml')
if not config.non_uniform_sampling:
    config.num_points *= config.sample_rate
val_dataset = Bindataset(
    dataset_dir='/home/v-wewei/code/two_stage_pointnet/mask_label_test/', 
    #dataset_dir='/home/v-wewei/VV_grasp_dataset/', 
    num_points=int(config.num_points), 
    transforms=transforms.Compose([
    d_utils.PointcloudToTensor(),
    #d_utils.PointcloudJitter(std=0.0001, clip=0.0001),
    ]),
    mode=mode,
    use_normal=config.use_normal,
    platform=config.platform,
    #non_uniform_sampling=False,
    aug_scene=False
    )

val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=b, shuffle=False,
    drop_last=False, num_workers=11 if b > 1 else 0, pin_memory=True)

model = eval(config.model)(config)
#model = PointNet2SemSegMSG(config) 
#pretrained_dict = torch.load('{}'.format('/home/v-wewei/code/checkpoints/test_FocalLoss_018_0.pth.tar'), map_location='cpu')
for i in range(29, 30):
    pretrained_dict = torch.load('/home/v-wewei/code/two_stage_pointnet/checkpoints/test_two_stage_loss_{:03d}_0.pth.tar'.format(i), map_location='cpu')
    model.load_state_dict(pretrained_dict['state_dict'], strict=True)
    
    model.cuda()
    model.eval()
    ratio = AverageMeter()
    recall = AverageMeter()
    total_list = []
    list1 = []
    list2 = []
    list3 = []
    list4 = []
    # gt_interval1_total = 0
    # gt_interval2_total = 0
    # gt_interval3_total = 0
    # gt_interval4_total = 0
    # gt_interval5_total = 0
    # gt_interval6_total = 0
    # gt_interval7_total = 0
    # gt_interval8_total = 0
    #
    # pred_interval1_total = 0
    # pred_interval2_total = 0
    # pred_interval3_total = 0
    # pred_interval4_total = 0
    # pred_interval5_total = 0
    # pred_interval6_total = 0
    # pred_interval7_total = 0
    # pred_interval8_total = 0
    with torch.no_grad():
        for i, batch in enumerate(val_loader):
#            batch = val_loader[0]
            if True:
                data, label, dataset_idx = batch
                print('dataset_idx is : ', dataset_idx)
                data = data.cuda(non_blocking=True)
                cls_label = label[0].cuda(non_blocking=True)
                score_label = label[1].cuda(non_blocking=True)
                pose_label = label[2].cuda(non_blocking=True)
                matrix_label = label[3].cuda(non_blocking=True)
                if not config.non_uniform_sampling:
                    print('non uniform_sampling')
                    idx = furthest_point_sample(data, int(config.num_points/config.sample_rate))
                    data = gather_operation(
                        data.transpose(1, 2).contiguous(),
                        idx).transpose(1, 2).contiguous()
                    cls_label = gather_operation(
                        cls_label.float().unsqueeze(dim=2).transpose(1, 2).contiguous(),
                        idx).squeeze().long().contiguous()
                    pose_label = gather_operation(
                        pose_label.transpose(1, 2).contiguous(),
                        idx).transpose(1, 2).contiguous()
                #z_vec = z_vec.cuda(non_blocking=True)
                fg_mask = (cls_label.view(-1) > 0)
                #print('fg_sum is:', fg_mask.long().sum())
                fg_mask_ = (cls_label.squeeze()>0).long()
                kwargs = {'cls_label': cls_label, 'pose_label': pose_label, 'score_label': score_label, 'matrix_label': matrix_label, 'mode': mode}            
                batch_dict = model(data, **kwargs)

                data_ = data.detach().cpu().numpy().squeeze()
                # print('data shape is : ', data_.shape)

                cls_label = cls_label.detach().cpu().numpy().squeeze()
                cls_pred = batch_dict['batch_cls_preds'].detach().cpu().numpy().squeeze()
                pose_pred = batch_dict['batch_grasp_preds']
                pose_pred2 = batch_dict['new_batch_grasp_preds']
                pose_pred3 = batch_dict['gt_grasps']
              #  print(batch_dict.keys())

                rois_width = batch_dict['rois'].detach().cpu().numpy().squeeze()[:, 9]
                new_grasp_width = batch_dict['new_batch_grasp_preds'].detach().cpu().numpy().squeeze()[:, 9]
                gt_width = pose_label.detach().cpu().numpy().squeeze()[:, 6]
                selected_index = batch_dict['selected'].cpu().numpy()
                non_zero_index = np.where(gt_width[selected_index]>0)


                new_acc = np.abs(gt_width[selected_index][non_zero_index]-new_grasp_width[non_zero_index])
                total_list.append(np.array(non_zero_index).shape[1])
                a = np.array(np.where(new_acc < 0.0025))
                b = np.array(np.where(new_acc < 0.005))
                c = np.array(np.where(new_acc < 0.0075))
                d = np.array(np.where(new_acc < 0.01))
                list1.append(a.shape[1])
                list2.append(b.shape[1])
                list3.append(c.shape[1])
                list4.append(d.shape[1])
    #             # print('finished')
    # percent1 =np.array(list1).sum()/ np.array(total_list).sum()
    # percent2 =np.array(list2).sum()/ np.array(total_list).sum()
    # percent3 =np.array(list3).sum()/ np.array(total_list).sum()
    # percent4 =np.array(list4).sum()/ np.array(total_list).sum()
    # print(percent1,percent2,percent3,percent4,np.array(total_list).sum())
    # print('finish')



                #add by lifuyu 2020.10.12 14:00
 # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
               # print(cls_label,cls_pred)
                #select the Iou foreground_mask_point (condition: predict_score > 0.3 cls_label == 1)
                # condition1 = np.where(cls_pred >= config.seg_thresh)
                # condition2 = np.where(cls_label == 1)
                # select_fg_mask_point_index = np.intersect1d(condition1,condition2)
                # # pc_ =trimesh.PointCloud(data_,colors=[0,255,0])
                # # pc = trimesh.PointCloud(data_[select_fg_mask_point_index],colors=[255,0,0])
                # # scene = trimesh.Scene()
                # # scene.add_geometry(pc_)
                # # scene.add_geometry(pc)
                # #scene.show()
                # #print('1')
                #
                # # get the ground_truth grasp_width
                # gt_width = pose_label.detach().cpu().numpy().squeeze()[:, 6]
                # #print(gt_width.shape)
                #
                # # get the predict_grasp_width
                # pred_width = pose_pred.detach().cpu().numpy().squeeze()[:, 9]
                # #print(pred_width.shape)
                #
                # selected_index = batch_dict['selected'].cpu().numpy()
                # # point_index = np.intersect1d(selected_index,select_fg_mask_point_index)
                # # calculate metric for [0.0025, 0.005, 0.0075] (accuracy)
                # non_zero_index = np.where(gt_width[selected_index]>0)
                # acc = np.abs(gt_width[selected_index][non_zero_index]-pred_width[selected_index][non_zero_index])
                #
                # total_list.append(np.array(non_zero_index).shape[1])
                # a = np.array(np.where(acc < 0.0025))
                # b = np.array(np.where(acc < 0.005))
                # c = np.array(np.where(acc < 0.0075))
                # d = np.array(np.where(acc < 0.01))
                # list1.append(a.shape[1])
                # list2.append(b.shape[1])
                # list3.append(c.shape[1])
                # list4.append(d.shape[1])

#                print(list1)


                # gt_interval1 = np.sum((gt_width[select_fg_mask_point_index]<0.005) & (gt_width[select_fg_mask_point_index] >0))
                # gt_interval2 = np.sum((gt_width[select_fg_mask_point_index]<0.01) &  (gt_width[select_fg_mask_point_index] >=0.005))
                # gt_interval3 = np.sum((gt_width[select_fg_mask_point_index]<0.015) & (gt_width[select_fg_mask_point_index]>=0.01))
                # gt_interval4 = np.sum((gt_width[select_fg_mask_point_index]<0.020) & (gt_width[select_fg_mask_point_index]>=0.015))
                # gt_interval5 = np.sum((gt_width[select_fg_mask_point_index] < 0.025)&(gt_width[select_fg_mask_point_index] >= 0.020))
                # gt_interval6 = np.sum((gt_width[select_fg_mask_point_index] < 0.03) &(gt_width[select_fg_mask_point_index] >= 0.025))
                # gt_interval7 = np.sum((gt_width[select_fg_mask_point_index] < 0.035)&(gt_width[select_fg_mask_point_index] >= 0.030))
                # gt_interval8 = np.sum((gt_width[select_fg_mask_point_index] < 0.04) &(gt_width[select_fg_mask_point_index] >= 0.035))
                #
                #
                # pred_interval1 = np.sum((pred_width[select_fg_mask_point_index]<0.005) &(pred_width[select_fg_mask_point_index] >0))
                # pred_interval2 = np.sum((pred_width[select_fg_mask_point_index]<0.01) & (pred_width[select_fg_mask_point_index] >=0.005))
                # pred_interval3 = np.sum((pred_width[select_fg_mask_point_index]<0.015) &(pred_width[select_fg_mask_point_index] >=0.01))
                # pred_interval4 = np.sum((pred_width[select_fg_mask_point_index]<0.020) &(pred_width[select_fg_mask_point_index] >=0.015))
                # pred_interval5 = np.sum((pred_width[select_fg_mask_point_index]<0.025)& (pred_width[select_fg_mask_point_index] >= 0.020))
                # pred_interval6 = np.sum((pred_width[select_fg_mask_point_index] < 0.03)&(pred_width[select_fg_mask_point_index] >= 0.025))
                # pred_interval7 = np.sum((pred_width[select_fg_mask_point_index] <0.035)&(pred_width[select_fg_mask_point_index] >= 0.030))
                # pred_interval8 = np.sum((pred_width[select_fg_mask_point_index] <0.04) &(pred_width[select_fg_mask_point_index] >= 0.035))
                #
                # gt_interval1_total = gt_interval1_total+gt_interval1
                # gt_interval2_total = gt_interval2_total+gt_interval2
                # gt_interval3_total = gt_interval3_total+gt_interval3
                # gt_interval4_total = gt_interval4_total+gt_interval4
                # gt_interval5_total = gt_interval5_total+gt_interval5
                # gt_interval6_total = gt_interval6_total+gt_interval6
                # gt_interval7_total = gt_interval7_total+gt_interval7
                # gt_interval8_total = gt_interval8_total+gt_interval8
                #
                #
                # pred_interval1_total = pred_interval1_total+pred_interval1
                # pred_interval2_total = pred_interval2_total+pred_interval2
                # pred_interval3_total = pred_interval3_total+pred_interval3
                # pred_interval4_total = pred_interval4_total+pred_interval4
                # pred_interval5_total = pred_interval5_total+pred_interval5
                # pred_interval6_total = pred_interval6_total+pred_interval6
                # pred_interval7_total = pred_interval7_total+pred_interval7
                # pred_interval8_total = pred_interval8_total+pred_interval8

#                print(gt_interval1)

    # percent1 =np.array(list1).sum()/ np.array(total_list).sum()
    # percent2 =np.array(list2).sum()/ np.array(total_list).sum()
    # percent3 =np.array(list3).sum()/ np.array(total_list).sum()
    # percent4 =np.array(list4).sum()/ np.array(total_list).sum()
    # print(percent1,percent2,percent3,percent4)
    # print(np.array(total_list).sum())
    # print('finish')
# #
#
#     list_gt = [gt_interval1_total,gt_interval2_total,gt_interval3_total,gt_interval4_total,gt_interval5_total,gt_interval6_total,
#                gt_interval7_total,gt_interval8_total]
#     list_pred =[pred_interval1_total,
#                 pred_interval2_total,
#                 pred_interval3_total,
#                 pred_interval4_total,
#                 pred_interval5_total,
#                 pred_interval6_total,
#                 pred_interval7_total,
#                 pred_interval8_total]
#     print(list_pred,list_gt)
    # exit()
    # import matplotlib.pyplot as plt
    # import matplotlib
    #
    # # 设置中文字体和负号正常显示
    # matplotlib.rcParams['font.sans-serif'] = ['SimHei']
    # matplotlib.rcParams['axes.unicode_minus'] = False
    #
    # label_list = ['0-0.005', '0.005-0.01', '0.01-0.015', '0.015-0.02','0.02-0.025','0.025-0.03','0.03-0.035','0.035-0.04']  # 横坐标刻度显示值
    # num_list1 = list_gt  # 纵坐标值1
    # num_list2 = list_pred  # 纵坐标值2
    # x = range(len(num_list1))
    # """
    # 绘制条形图
    # left:长条形中点横坐标
    # height:长条形高度
    # width:长条形宽度，默认值0.8
    # label:为后面设置legend准备
    # """
    # rects1 = plt.bar(left=x, height=num_list1, width=0.4, alpha=0.8, color='red', label="一部门")
    # rects2 = plt.bar(left=[i + 0.4 for i in x], height=num_list2, width=0.4, color='green', label="二部门")
    # plt.ylim(0, 50)  # y轴取值范围
    # plt.ylabel("数量")
    # """
    # 设置x轴刻度显示值
    # 参数一：中点坐标
    # 参数二：显示值
    # """
    # plt.xticks([index + 0.2 for index in x], label_list)
    # plt.xlabel("年份")
    # plt.title("某某公司")
    # plt.legend()  # 设置题注
    # # 编辑文本
    # for rect in rects1:
    #     height = rect.get_height()
    #     plt.text(rect.get_x() + rect.get_width() / 2, height + 1, str(height), ha="center", va="bottom")
    # for rect in rects2:
    #     height = rect.get_height()
    #     plt.text(rect.get_x() + rect.get_width() / 2, height + 1, str(height), ha="center", va="bottom")
    # plt.show()
  # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
        #add finished


                show = True
                if show:
                    if mode in ['train', 'validate']:
                        red = np.where(cls_label ==1)
                        red_ = np.where(cls_label !=1)
                        pointcloud_red = trimesh.PointCloud(data_[red], colors=[255, 0, 0])
                        pointcloud_red_ = trimesh.PointCloud(data_[red_], colors=[0, 255, 0])
                        pose_label = pose_label.detach().cpu().numpy().squeeze()

                   # print('max is :', cls_pred.max())
                   # print('min is :', cls_pred.min())
                    blue = np.where(cls_pred >= config.seg_thresh)
                    blue_ = np.where(cls_pred < config.seg_thresh)
                    pointcloud_blue = trimesh.PointCloud(data_[blue], colors=[0, 0, 255])
                    pointcloud_blue_ = trimesh.PointCloud(data_[blue_], colors=[0, 255, 0])

                    point_cloud = trimesh.PointCloud(data_, colors=[0, 255, 0])
                    selected_box_inds = batch_dict['selected'].detach().cpu().numpy()
                    #print(selected_box_inds)
                    #sampled_box_inds = batch_dict['target_sampled_inds'].detach().cpu().numpy()
                    # show points in the first box
                    key_ = 20
                    selected_points = batch_dict['selected_points'].squeeze()[key_].detach().cpu().numpy()
                    #print('selected_points shape is : ', selected_points.shape)
                    ori_points = batch_dict['ori_points'].squeeze().detach().cpu().numpy()
                    pointcloud_selected_points = trimesh.PointCloud(selected_points, colors=[255, 0, 0])
                    pointcloud_ori = trimesh.PointCloud(ori_points,colors=[0,255,0])
                   # print(pose_pred.shape,pose_pred2.shape,pose_pred3.shape)
                    R = pose_pred[:, :, [3, 4, 5, 6, 7, 8, 10, 11, 12]].view(-1, 3, 3).transpose(-1, -2).detach().cpu().numpy()[selected_box_inds]
                    #R = R[non_zero_index]
                    R2 = pose_pred2[:, :, [3, 4, 5, 6, 7, 8, 10, 11, 12]].view(-1, 3, 3).transpose(-1,-2).detach().cpu().numpy()
                    #R2 = R2[non_zero_index]
                    R3 = matrix_label.detach().cpu().numpy().squeeze()[selected_box_inds]
                    #R3 = R3[non_zero_index]
                    pose_pred = pose_pred.detach().cpu().numpy().squeeze()
                    pose_pred2 =pose_pred2.detach().cpu().numpy().squeeze()
                    pose_pred3 = pose_pred3.detach().cpu().numpy().squeeze()
#                    print('R',R.shape,R[selected_box_inds].shape)
                    #stage1_center =pose_pred[selected_box_inds][non_zero_index][:, :3]
                    stage1_center = pose_pred[selected_box_inds][:, :3]
                    #stage2_center = pose_pred2[non_zero_index][:,:3]
                    stage2_center = pose_pred2[:, :3]
                    #gt_center = pose_label[selected_box_inds[non_zero_index], :3] +  data_[selected_box_inds][non_zero_index]
                    gt_center = pose_label[selected_box_inds, :3] + data_[selected_box_inds]
                    pred_width = pose_pred[:, 9]
                    #pred_width2 = pose_pred2[:,9]
                    stage1_w = pred_width[selected_box_inds]
                    #stage1_w = stage1_w[non_zero_index]

                    new_grasp_width = batch_dict['new_batch_grasp_preds'].detach().cpu().numpy().squeeze()[:, 9]
                    new_label = torch.sigmoid(batch_dict['new_batch_cls_preds']).detach().cpu().numpy().squeeze()
                    final_index = (new_label > 0.98)
                    #print(final_index)

                    stage2_w = new_grasp_width
                    #stage2_w = stage2_w[non_zero_index]
                    gt_w = gt_width[selected_box_inds]
                    #gt_w = gt_w[non_zero_index]

                    #print(pose_pred[selected_box_inds][:, :3])
                    #R = pose_pred[:, [3, 4, 5, 6, 7, 8, 10, 11, 12]].view(-1, 3, 3).transpose(-1, -2)
                    # path = draw_box(
                    #     pose_pred[selected_box_inds][:, :3],
                    #     R[selected_box_inds],
                    #     #w=targets_dict['rois'][0, 0, 9].detach().cpu().numpy()
                    #     )

                    path_1 = draw_box(
                        stage1_center,
                        R,
                        #np.expand_dims(R,),
                        #w=stage1_w[key_]
                        #w=targets_dict['rois'][0, 0, 9].detach().cpu().numpy()
                        )
                    #print(targets_dict['gt_of_rois'][0, 0])
                    #exit()
                    path_2 = draw_box(
                        stage2_center[final_index],
                        R2[final_index],
                        #np.expand_dims(R2, 0),
                        #w=stage2_w[key_]
                    )
                    path_3 = draw_box(
                        gt_center[key_],
                        #R3,
                        np.expand_dims(R3[key_], 0),
                        w = gt_w[key_]
                    )
                    print(R3[:1,:])
                    colors = np.ones((len(path_1.entities), 3))
                    colors2 = np.ones((len(path_2.entities), 3))
                    colors3 = np.ones((len(path_3.entities), 3))
                    colors_1 = (colors * [255, 0, 0]).astype(np.uint8)
                    colors_2 = (colors2 * [255,255, 51]).astype(np.uint8)
                    colors_3 = (colors3 * [247,144,61]).astype(np.uint8)
                    path_1.colors = colors_1
                    path_2.colors = colors_2
                    path_3.colors = colors_3
                    pc_selected_points = trimesh.PointCloud(selected_points,colors=[0,0,255])
                    print('here')
                    # gripper = trimesh.load(
                    #     '/home/v-wewei/code/two_stage_pointnet_bak/new_gripper.stl')
                    #gripper.apply_transform(R3[:1,:])
                    scene_p = trimesh.Scene()
                    scene = trimesh.Scene()
                    scene1 = trimesh.Scene()
                    #scene.add_geometry(point_cloud)
                    scene_p.add_geometry(pointcloud_blue_)
                    scene_p.add_geometry(pointcloud_blue)
                    scene_t = trimesh.Scene()
                    scene_t.add_geometry(pointcloud_red)
                    scene_t.add_geometry(pointcloud_red_)
                    #Application(scene_p, scene_t)
                    #scene.add_geometry(path_1)
                    #scene.add_geometry(path_2)
                    scene.add_geometry(path_3)
                    scene1.add_geometry(path_3)
                    #scene.add_geometry(point_cloud)
                    scene.add_geometry(pointcloud_ori)
                    scene1.add_geometry(pc_selected_points)
                    #scene.add_geometry(gripper)
                    bin = trimesh.load_mesh('/home/v-wewei/code/two_stage_pointnet/dataset/data/bin/bin.obj')
                    T = trimesh.transformations.translation_matrix([0.007507 , -0.00926 , -0.0653 ])
                    bin.apply_transform(T)
                    bin.visual.face_colors = [190,190,190]
                    #scene.add_geometry(bin)
                    #Application(scene,scene1)
                    scene.show()
                   # scene1.show()
                   # print(dir(o3d))
                    selected_points_o3d = o3d.PointCloud()
                    selected_points_o3d.points = o3d.Vector3dVector(selected_points)
                    #selected_points_o3d.colors = [1, 0, 0]
                    #color = [247/255,144/255,61/255]
                    color = [0,0,0]
                    box_sample = draw_box2(gt_center[key_],
                        #R3,
                        np.expand_dims(R3[key_], 0))
                    box_sample = box_sample.squeeze()
                    object_bbox = bounding_box(box_sample, color)
                    #print(dir(selected_points_o3d))
                    selected_points_o3d.paint_uniform_color([1,91/255,78/255])
                    o3d.visualization.draw_geometries([selected_points_o3d])

#                    #path_1 = draw_box(pose_pred[selected_box_inds][sampled_box_inds][:1], R[selected_box_inds][sampled_box_inds][:1], w=targets_dict['rois'][0, 0, 9].detach().cpu().numpy())
#                    #print(pose_pred[selected_box_inds][sampled_box_inds][:1])
#                    #path_2 = draw_box(np.zeros((1,3)), R=np.identity(3).reshape(-1, 3, 3))
#                    center = pose_label[fg_mask.cpu().numpy()][:, :3]+data_[fg_mask.cpu().numpy()]
#                    print(center.shape)
#                    center_vis = trimesh.PointCloud(center, colors=[255, 255, 0])
#                    center_pred_vis = trimesh.PointCloud(pose_pred[fg_mask.cpu().numpy()][:, :3].squeeze(), colors=[0, 255, 255])
#                    #ray_origins = data_[fg_mask.cpu().numpy()]
#                    #ray_directions = closing_vector_label.cpu().numpy()
#                    #ray_visualize = trimesh.load_path(np.hstack((ray_origins, ray_origins+ray_directions / 50)).reshape(-1, 2, 3))
#
#                    scene_1 = trimesh.Scene()
#                    scene_1.add_geometry(center_vis)
#                    scene_1.add_geometry(center_pred_vis)
#                    #scene_1.add_geometry(path_1)
#                    #scene_1.add_geometry(ray_visualize)
#                   # scene_1.add_geometry(path_2)
#                    scene_1.add_geometry(pointcloud_red_)
#                    scene_1.add_geometry(pointcloud_red)
#                    #scene_1.add_geometry(pointcloud_selected_points)
#                    Application(scene, scene_1)
#                    #scene_1.show()
#                #print(targets_dict.keys())
           # except:
           #  else:
           #      pass






def main():
    # unit = 'mm'
    #
    # # get mesh list
    # mesh_folder_dir = "/media/yumi/Datas/Mesh/grasp_mesh_model/original_file/vv_finish_stl"
    # mesh_list = os.listdir(mesh_folder_dir)
    # # print(mesh_list)
    #
    # suffix = '.stl'
    # for mesh_file in mesh_list:
    #     if (suffix in mesh_file):
    #         # load mesh and visualize
    #         mesh_path = os.path.join(mesh_folder_dir, mesh_file)
    #
    #         mesh = trimesh.load_mesh(mesh_path)
    #         mesh.export(mesh_path.replace('.stl', '.ply'))
    #         mesh_o3d = o3d.read_triangle_mesh(mesh_path.replace('.stl', '.ply'))
    #         print(mesh_o3d)
    #
    #         # unit conversion (m -> mm)
    #         max_bound = mesh_o3d.get_max_bound()
    #         if ((unit == 'mm') & (max_bound[0] < 1)):
    #             R = np.identity(3)
    #             T = np.zeros(3)
    #             Z = [1000.0, 1000.0, 1000.0]
    #             H = t3d.affines.compose(T, R, Z)
    #             mesh_o3d.transform(H)
            pc_selected_points = trimesh.PointCloud(selected_points, colors=[255, 0, 0])
            selected_points_o3d = o3d.pointcloud()
            selected_points_o3d.points = o3d.Vector3dVector(selected_points)
            selected_points_o3d.colors =[1,0,0]
            # draw object's bounding box
            # new_mesh_bound = mesh_o3d.get_max_bound()
            # color = [1, 0, 0]
            # object_bbox = bounding_box(new_mesh_bound, color)
            #
            # # visualization, x, y, z axis will be rendered as red, green, and blue
            # base_coordinate = o3d.create_mesh_coordinate_frame(size=50)
            #
            # # reference bbox
            # reference_bound = [50, 50, 50]
            # reference_color = [0, 1, 0]
            # reference_bbox = bounding_box(reference_bound, reference_color)

            o3d.visualization.draw_geometries([selected_points_o3d])


if __name__ == '__main__':
    main()