import torch
import numpy as np
import torch.nn as nn
import torchvision.transforms as transforms
import sys
import transforms3d as t3d
import open3d as o3d

sys.path.append('../')
import dataset.data_utils as d_utils
from dataset.BindatasetLoader import Bindataset
from models.two_stage_graspnet import Two_Stage_GraspNet
from models.pointnet2_msg_sem import PointNet2SemSegMSG
from models.roi_head import RoI_Head
# from common.utils.config import merge_cfg_into_cfg
from common.lib.metric import AverageMeter
from common.lib.bbox import decode_bbox_target, angle_to_vector, grasp_angle_to_vector, rotation_from_vector
from utils.pointnet2_utils import furthest_point_sample, gather_operation
import trimesh
import yaml
import os
import trimesh.viewer
import pyglet
import glooey
import torch.backends.cudnn as cudnn
import random

os.environ['CUDA_VISIBLE_DEVICES'] = '0'
random.seed(1)
np.random.seed(1)
torch.manual_seed(1)
torch.cuda.manual_seed(1)
cudnn.benchmark = False
cudnn.determinstic = True


class Application:
    """
    Example application that includes moving camera, scene and image update.
    """

    def __init__(self, scene_1, scene_2):
        # create window with padding
        self.width, self.height = 920 * 2, 960
        window = self._create_window(width=self.width, height=self.height)

        gui = glooey.Gui(window)

        hbox = glooey.HBox()
        hbox.set_padding(5)

        # scene widget for changing camera location
        self.scene_widget1 = trimesh.viewer.SceneWidget(scene_1)
        hbox.add(self.scene_widget1)

        # scene widget for changing scene
        self.scene_widget2 = trimesh.viewer.SceneWidget(scene_2)
        hbox.add(self.scene_widget2)

        # integrate with other widget than SceneWidget

        gui.add(hbox)

        pyglet.app.run()

    def _create_window(self, width, height):
        try:
            config = pyglet.gl.Config(sample_buffers=1,
                                      samples=4,
                                      depth_size=24,
                                      double_buffer=True)
            window = pyglet.window.Window(config=config,
                                          width=width,
                                          height=height)
        except pyglet.window.NoSuchConfigException:
            config = pyglet.gl.Config(double_buffer=True)
            window = pyglet.window.Window(config=config,
                                          width=width,
                                          height=height)

        @window.event
        def on_key_press(symbol, modifiers):
            if modifiers == 0:
                if symbol == pyglet.window.key.Q:
                    window.close()

        return window




# 0.041  0.04  0.018
def draw_box(center, R, l=0.041, w=0.04, h=0.018):
    x_corners = [0.004, 0.004, -(l - 0.004), -(l - 0.004), 0.004, 0.004, -(l - 0.004), -(l - 0.004)]
    # x_corners = [x + center[0][0] for x in x_corners]
    y_corners = [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2]
    # y_corners = [y + center[0][1] for y in y_corners]
    z_corners = [h / 2, h / 2, h / 2, h / 2, -h / 2, -h / 2, -h / 2, -h / 2]
    # z_corners = [z + center[0][2] for z in z_corners]
    # print(np.vstack([x_corners, y_corners, z_corners]))
    corners_3d = np.dot(R, np.vstack([x_corners, y_corners, z_corners]))
    # print(corners_3d)
    box_sample = corners_3d + np.expand_dims(center, -1)
    box_sample = box_sample.transpose(0, 2, 1)

    ray_origins = np.array([box_sample[:, 0], box_sample[:, 2], box_sample[:, 5], box_sample[:, 7]] * 3)
    # ray_origins = np.array([box_sample[:, 2], box_sample[:, 3]]*2)
    x_axis = R[:, :, 0]
    y_axis = R[:, :, 1]
    z_axis = R[:, :, 2]
    ray_directions = np.array(
        [-w * y_axis, w * y_axis, w * y_axis, -w * y_axis, -l * x_axis, l * x_axis, -l * x_axis, l * x_axis,
         -h * z_axis, -h * z_axis, h * z_axis, h * z_axis])  # [-0.004, 0.004, 0.004, -0.004]
    # ray_directions = np.array([l*x_axis, l*x_axis, -w*y_axis, w*y_axis]) #[-0.004, 0.004, 0.004, -0.004]
    # ray_directions = np.array([l*x_axis, l*x_axis, w*y_axis, -w*y_axis]) #[-0.004, 0.004, 0.004, -0.004]
    # print(ray_origins.shape)
    # print(ray_directions.shape)
    path = trimesh.load_path(
        np.hstack((ray_origins.reshape(-1, 3), (ray_origins + ray_directions).reshape(-1, 3))).reshape(-1, 2, 3))

    return path



def draw_box2(center, R, l=0.03, w=0.035, h=0.032):
    x_corners = [0.004, 0.004, -(l - 0.004), -(l - 0.004), 0.004, 0.004, -(l - 0.004), -(l - 0.004)]
    # x_corners = [x + center[0][0] for x in x_corners]
    y_corners = [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2]
    # y_corners = [y + center[0][1] for y in y_corners]
    z_corners = [h / 2, h / 2, h / 2, h / 2, -h / 2, -h / 2, -h / 2, -h / 2]
    # z_corners = [z + center[0][2] for z in z_corners]
    # print(np.vstack([x_corners, y_corners, z_corners]))
    corners_3d = np.dot(R, np.vstack([x_corners, y_corners, z_corners]))
    # print(corners_3d)
    box_sample = corners_3d + np.expand_dims(center, -1)
    box_sample = box_sample.transpose(0, 2, 1)
    return box_sample


def bounding_box(bound, color):
    # bound_x = bound[0]
    # bound_y = bound[1]
    # bound_z = bound[2]

    point_1 = bound[0]  # [bound_x, bound_y, bound_z]
    point_2 = bound[1]  # [-bound_x, bound_y, bound_z]
    point_3 = bound[2]  # [-bound_x, -bound_y, bound_z]
    point_4 = bound[3]  # [bound_x, -bound_y, bound_z]
    point_5 = bound[4]  # [bound_x, -bound_y, -bound_z]
    point_6 = bound[5]  # [bound_x, bound_y, -bound_z]
    point_7 = bound[6]  # [-bound_x, bound_y, -bound_z]
    point_8 = bound[7]  # [-bound_x, -bound_y, -bound_z]

    points = [point_1, point_2, point_3, point_4,
              point_5, point_6, point_7, point_8]

    lines = [[0, 1], [1, 2], [2, 3], [3, 0],
             [1, 5], [2, 6], [3, 7], [0, 4],
             [4, 5], [5, 6], [6, 7], [7, 4]]

    line_color = [color for i in range(len(lines))]
    line_set = o3d.LineSet()
    line_set.points = o3d.Vector3dVector(points)
    line_set.lines = o3d.Vector2iVector(lines)
    line_set.colors = o3d.Vector3dVector(line_color)

    return line_set


hh = 0.015  # self.gripper.hand_height
fw = 0.011  # self.gripper.finger_width
hod = 0.072  # self.gripper.hand_outer_diameter
hd = 0.043  # self.gripper.hand_depth


value_fc = 0.4  # no use, set a random number
num_grasps = 40
num_workers = 20
max_num_samples = 150
n_voxel = 500

minimal_points_send_to_point_net = 20
marker_life_time = 8

show_bad_grasp = False
save_grasp_related_file = False
input_points_num = 500

draw_all_box = True

def draw_box_gpd(center, R, l=hd, w = hod-2*fw, h=hh): #hd hh width

    x_corners = [0,      0,     l ,    l,    0,     0 ,     l,     l]
    y_corners = [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2]
    z_corners = [h / 2, h / 2, h / 2, h / 2, -h / 2, -h / 2, -h / 2, -h / 2]

    R = np.expand_dims(R,0)

    corners_3d = np.dot(R, np.vstack([x_corners, y_corners, z_corners]))
    box_sample = corners_3d + np.expand_dims(center, -1)
    box_sample = box_sample.transpose(0, 2, 1)
    ray_origins = np.array([box_sample[:, 0], box_sample[:, 2], box_sample[:, 5], box_sample[:, 7]]*3)
    x_axis = R[ :, :, 0]
    y_axis = R[ :, :, 1]
    z_axis = R[ :, :, 2]
    ray_directions = np.array([-w*y_axis, w*y_axis, w*y_axis, -w*y_axis,  l*x_axis, -l*x_axis, l*x_axis, -l*x_axis,
                               -h*z_axis, -h*z_axis, h*z_axis, h*z_axis])
    path = trimesh.load_path(np.hstack((ray_origins.reshape(-1, 3), (ray_origins + ray_directions).reshape(-1, 3))).reshape(-1, 2, 3))
    return path



def check_collision_square(grasp_bottom_center, approach_normal, binormal,
                           minor_pc, points_, color_,Rz,width_):
    approach_normal = approach_normal.reshape(1, 3)
    #approach_normal = approach_normal / np.linalg.norm(approach_normal)
    binormal = binormal.reshape(1, 3)
    #binormal = binormal / np.linalg.norm(binormal)
    minor_pc = minor_pc.reshape(1, 3)
    #minor_pc = minor_pc / np.linalg.norm(minor_pc)
    matrix_ = np.hstack([approach_normal.T, binormal.T, minor_pc.T])

    Rz_ = Rz
    grasp_matrix = np.dot(Rz_,matrix_)
    grasp_matrix = grasp_matrix.T
    point_cloud = points_.copy()
    points_ = points_ - grasp_bottom_center.reshape(1, 3)
    tmp = np.dot(grasp_matrix, points_.T)
    points_g = tmp.T
    color_path = color_
    if True:
        #width = ags.gripper.hand_outer_diameter - 2 * ags.gripper.finger_width
        width = width_
        x_limit = hd
        z_limit = hh / 2
        y_limit = width / 2
        x1 = points_g[:, 0] > 0
        x2 = points_g[:, 0] < x_limit
        y1 = points_g[:, 1] > -y_limit
        y2 = points_g[:, 1] < y_limit
        z1 = points_g[:, 2] > -z_limit
        z2 = points_g[:, 2] < z_limit
        a = np.vstack([x1, x2, y1, y2, z1, z2])
        points_in_area = np.where(np.sum(a, axis=0) == len(a))[0]
        if len(points_in_area) == 0:
            has_p = False
        else:
            has_p = True
        matrix_ =  np.dot(Rz_,matrix_)
        print("matrix_", matrix_)
        path =draw_box_gpd(grasp_bottom_center.copy(),matrix_.copy(),w = width_)

        grasp_bottom_center= np.expand_dims(grasp_bottom_center,0)

        # stack rays into line segments for visualization as Path3D
        ray_origins = grasp_bottom_center
        ray_directions =  matrix_[:,0] * hd #approach_normal * hd
        ray_directions2 = matrix_[:,1] * width_/2 #((hod-2*fw)/2) #binormal *((hod-2*fw)/2)
        ray_directions3 = matrix_[:,2] * (hh/2) #minor_pc * (hh/2)
        ray_visualize = trimesh.load_path(np.hstack((
                ray_origins,
                ray_origins + ray_directions)).reshape(-1, 2, 3))
        ray_visualize2 = trimesh.load_path(np.hstack((
                ray_origins,
                ray_origins + ray_directions2)).reshape(-1, 2, 3))
        ray_visualize3 = trimesh.load_path(np.hstack((
                ray_origins,
                ray_origins + ray_directions3)).reshape(-1, 2, 3))

        pc = trimesh.PointCloud(point_cloud, colors=[0, 255, 0])

        colors = np.ones((len(ray_visualize.entities), 3))
        colors_1 = (colors * [255, 0, 0]).astype(np.uint8)
        ray_visualize.colors = colors_1
        colors2 = np.ones((len(ray_visualize.entities), 3))
        colors_2 = (colors2 * [255, 255, 51]).astype(np.uint8)
        ray_visualize2.colors = colors_2
        colors3 = np.ones((len(ray_visualize.entities), 3))
        colors_3 = (colors3 * [0, 0, 255]).astype(np.uint8)
        ray_visualize3.colors = colors_3
        colors4 = np.ones((len(path.entities), 3))
        colors_4 = (colors4 * color_path).astype(np.uint8)
        path.colors = colors_4



        #draw stage1_path



        scene1 = trimesh.Scene()

        #scene1.add_geometry(pc)
        #scene1.add_geometry(ray_visualize)
        #scene1.add_geometry(ray_visualize2)
        #scene1.add_geometry(ray_visualize3)
        scene1.add_geometry(path)
        inside_point = trimesh.PointCloud(point_cloud[points_in_area], colors=[254, 129, 125])
        scene1.add_geometry(inside_point)
        trimesh.viewer.SceneViewer(scene1, line_settings={'point_size': 17,'line_width':17})
        #scene1.show()

    return has_p, points_in_area, points_g, scene1

def collect_pc(grasp_, pc,color_,Rz):

    """
    grasp_bottom_center, normal, major_pc, minor_pc
    """
    grasp_num = len(grasp_)
    grasp_ = np.array(grasp_)
    approach_normal = grasp_[:, [3,4,5]].copy()
    grasp_bottom_center_gpd = grasp_[:, [0,1,2]].copy()
    grasp_bottom_center = grasp_bottom_center_gpd  # + approach_normal* hd
    binormal = grasp_[:, [6,7,8]].copy()
    minor_pc = grasp_[:, [10,11,12]].copy()
    width_ = grasp_[:,9].squeeze()
    print("width",width_.shape)

    in_ind_ = []
    in_ind_points_ = []
    # p = ags.get_hand_points(np.array([0, 0, 0]), np.array([1, 0, 0]), np.array([0, 1, 0]))
    scene_list = []
    for i_ in range(grasp_num):
        has_p, in_ind_tmp, points_g, scene = check_collision_square(grasp_bottom_center[i_], approach_normal[i_],
                                                             binormal[i_], minor_pc[i_], pc,color_,Rz,width_)
        in_ind_.append(in_ind_tmp)
        in_ind_points_.append(points_g[in_ind_[i_]])
        scene_list.append(scene)

    return in_ind_, in_ind_points_, scene_list

if __name__ == '__main__':
    b = 1
    mode = 'validate'
    from common.utils.config_ import cfg, cfg_from_yaml_file, merge_new_config_file

    config = cfg_from_yaml_file('../experiments/base_config.yaml', cfg)
    config = merge_new_config_file(config, '../experiments/local_msg_classification_config.yaml')
    # config = merge_cfg_into_cfg('../experiments/local_msg_classification_config.yaml', '../experiments/base_config.yaml')
    if not config.non_uniform_sampling:
        config.num_points *= config.sample_rate
    val_dataset = Bindataset(
        dataset_dir='/home/v-wewei/code/two_stage_pointnet/mask_label_test/',
        # dataset_dir='/home/v-wewei/VV_grasp_dataset/',
        num_points=int(config.num_points),
        transforms=transforms.Compose([
            d_utils.PointcloudToTensor(),
            # d_utils.PointcloudJitter(std=0.0001, clip=0.0001),
        ]),
        mode=mode,
        use_normal=config.use_normal,
        platform=config.platform,
        # non_uniform_sampling=False,
        aug_scene=False
    )

    val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=b, shuffle=False,
                                             drop_last=False, num_workers=11 if b > 1 else 0, pin_memory=True)

    model = eval(config.model)(config)
    # model = PointNet2SemSegMSG(config)
    # pretrained_dict = torch.load('{}'.format('/home/v-wewei/code/checkpoints/test_FocalLoss_018_0.pth.tar'), map_location='cpu')
    for i in range(29, 30):
        pretrained_dict = torch.load(
            '/home/v-wewei/code/two_stage_pointnet/checkpoints/test_two_stage_loss_{:03d}_0.pth.tar'.format(i),
            map_location='cpu')
        model.load_state_dict(pretrained_dict['state_dict'], strict=True)

        model.cuda()
        model.eval()
        ratio = AverageMeter()
        recall = AverageMeter()

        with torch.no_grad():
            for i, batch in enumerate(val_loader):
                #            batch = val_loader[0]
                if True:
                    data, label, dataset_idx = batch
                    print('dataset_idx is : ', dataset_idx)
                    data = data.cuda(non_blocking=True)
                    cls_label = label[0].cuda(non_blocking=True)
                    score_label = label[1].cuda(non_blocking=True)
                    pose_label = label[2].cuda(non_blocking=True)
                    matrix_label = label[3].cuda(non_blocking=True)
                    if not config.non_uniform_sampling:
                        print('non uniform_sampling')
                        idx = furthest_point_sample(data, int(config.num_points / config.sample_rate))
                        data = gather_operation(
                            data.transpose(1, 2).contiguous(),
                            idx).transpose(1, 2).contiguous()
                        cls_label = gather_operation(
                            cls_label.float().unsqueeze(dim=2).transpose(1, 2).contiguous(),
                            idx).squeeze().long().contiguous()
                        pose_label = gather_operation(
                            pose_label.transpose(1, 2).contiguous(),
                            idx).transpose(1, 2).contiguous()
                    # z_vec = z_vec.cuda(non_blocking=True)
                    fg_mask = (cls_label.view(-1) > 0)
                    # print('fg_sum is:', fg_mask.long().sum())
                    fg_mask_ = (cls_label.squeeze() > 0).long()
                    kwargs = {'cls_label': cls_label, 'pose_label': pose_label, 'score_label': score_label,
                              'matrix_label': matrix_label, 'mode': mode}
                    batch_dict = model(data, **kwargs)
                    selected_box_inds = batch_dict['selected'].detach().cpu().numpy()
                    print(selected_box_inds)
                    pose_label = pose_label.detach().cpu().numpy().squeeze()

                    data_ = data.detach().cpu().numpy().squeeze()
                    # print('data shape is : ', data_.shape)
                    matrix_label = matrix_label.detach().cpu().numpy().squeeze()[selected_box_inds]
                    cls_label = cls_label.detach().cpu().numpy().squeeze()
                    cls_pred = batch_dict['batch_cls_preds'].detach().cpu().numpy().squeeze()
                    pose_pred = batch_dict['batch_grasp_preds']
                    pose_pred2 = batch_dict['new_batch_grasp_preds']
                    pose_pred3 = batch_dict['gt_grasps']
                    #  print(batch_dict.keys())

                    rois_width = batch_dict['rois'].detach().cpu().numpy().squeeze()[:, 9]
                    new_grasp_width = batch_dict['new_batch_grasp_preds'].detach().cpu().numpy().squeeze()[:, 9]
                    gt_width = pose_label[:, 6]
                    selected_index = batch_dict['selected'].cpu().numpy()
                    #non_zero_index = np.where(gt_width[selected_index] > 0)


                    real_grasp = batch_dict['gt_grasps'].detach().cpu().numpy().squeeze()
                    stage1_grasp = batch_dict['batch_grasp_preds'].detach().cpu().numpy().squeeze()
                    stage2_grasp = batch_dict['new_batch_grasp_preds'].detach().cpu().numpy().squeeze()
                    stage1_grasp = stage1_grasp[selected_box_inds]
                    #stage2_grasp = stage2_grasp[selected_box_inds]
                    print(real_grasp.shape,stage1_grasp.shape)



                    ori_points = batch_dict['ori_points'].squeeze().detach().cpu().numpy()
                    ori_points = ori_points #+ [-0.01,-0.01,-0.04]
                    pointcloud_ori = trimesh.PointCloud(ori_points, colors=[90 / 255, 166 / 255, 119 / 255])

                    stage1_grasp = stage1_grasp[:1]
                    stage1_grasp_ = np.zeros_like(stage1_grasp)
                    # -0.09,-0.028    /-0.1007
                    stage1_grasp_ = stage1_grasp_ +[-0.09,-0.028,0.02,
                                                    0,0,-1,
                                                    0,1,0,
                                                    0.065,
                                                    1,0,0
                                                    ]
                    origin, xaxis, yaxis, zaxis = [0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1]
                    gamma = np.pi/12
                    Rz_ = trimesh.transformations.rotation_matrix(gamma, zaxis)
                    Rz1 = Rz_[:3,:3]

                    stage2_grasp_ = np.zeros_like(stage1_grasp)
                    stage2_grasp_ = stage2_grasp_ +[-0.1007,-0.028,0.02,
                                                    0,0,-1,
                                                    0,1,0,
                                                    0.04,
                                                    1,0,0
                                                    ]
                    print(Rz1)
                    print(stage1_grasp_.shape)
                    #Rz2 = np.identity(3)
                    gamma2 = - np.pi / 12
                    Rz2_ = trimesh.transformations.rotation_matrix(gamma2, zaxis)
                    Rz2 = Rz2_[:3, :3]

                    gamma_gpd =  np.pi / 6
                    R_gpd = trimesh.transformations.rotation_matrix(gamma_gpd, yaxis)
                    R_gpd_ = R_gpd[:3, :3]

                    in_ind, in_ind_points, scene_list1 = collect_pc(stage1_grasp_, data_,color_=[56,89,137],Rz = Rz1)
                    #in_ind2, in_ind_points2, scene_list2 = collect_pc(stage2_grasp_, data_,color_=[247,144,61],Rz = Rz2)
                    #in_ind3, in_ind_points3, scene_list3 = collect_pc(stage2_grasp_, data_, color_=[56,89,137], Rz=R_gpd_)
                    #pc_gpd = trimesh.PointCloud(ori_points[in_ind3],colors=[254, 129, 125])
                    pc_ours_1 = trimesh.PointCloud(ori_points[in_ind],colors=[254, 129, 125])
                    #pc_ours_2 = trimesh.PointCloud(ori_points[in_ind2],colors=[254, 129, 125])
                    scene_list = scene_list1 #+ scene_list2
                    #scene_list = scene_list3
                    scene_ori = trimesh.Scene()
                    pointcloud_data = trimesh.PointCloud(data_, colors=[90 / 255, 166 / 255, 119 / 255])
                    scene_ori.add_geometry(pointcloud_ori)
                    scene_ori.add_geometry(pc_ours_1)
                    matrix_obj =[4.07309678e-02 ,7.65187538e-01 , 6.42517719e-01, - 1.10135697e-01,
                                 -4.80094848e-02 , 6.43807397e-01 ,- 7.63679989e-01, - 2.23687422e-02,
                                 -9.98016071e-01,2.58480372e-04,6.29591578e-02,4.35433760e-02,
                                 0.00000000e+00 , 0.00000000e+00 , 0.00000000e+00 , 1.00000000e+00]

                    matrix_obj = np.array(matrix_obj).reshape(4,4)
                    print(matrix_obj.shape)
                    mesh = trimesh.load_mesh("/home/v-wewei/test_stl/Pony_800_tex.stl")
                    T = trimesh.transformations.translation_matrix([0.005, -0.005, -0.068])
                    mesh.apply_transform(matrix_obj)
                    mesh.apply_transform(T)
                    scene_ori.add_geometry(mesh)
                    scene_list.append(scene_ori)

                    scene_all = trimesh.scene.scene.append_scenes(scene_list, common=['world'])
                    trimesh.viewer.SceneViewer(scene_all, line_settings={'point_size': 11, 'line_width': 17})
                    #scene_all.show()
'''
b = 1
mode = 'validate'
from common.utils.config_ import cfg, cfg_from_yaml_file, merge_new_config_file

config = cfg_from_yaml_file('../experiments/base_config.yaml', cfg)
config = merge_new_config_file(config, '../experiments/local_msg_classification_config.yaml')
# config = merge_cfg_into_cfg('../experiments/local_msg_classification_config.yaml', '../experiments/base_config.yaml')
if not config.non_uniform_sampling:
    config.num_points *= config.sample_rate
val_dataset = Bindataset(
    dataset_dir='/home/v-wewei/code/two_stage_pointnet/mask_label_test/',
    # dataset_dir='/home/v-wewei/VV_grasp_dataset/',
    num_points=int(config.num_points),
    transforms=transforms.Compose([
        d_utils.PointcloudToTensor(),
        # d_utils.PointcloudJitter(std=0.0001, clip=0.0001),
    ]),
    mode=mode,
    use_normal=config.use_normal,
    platform=config.platform,
    # non_uniform_sampling=False,
    aug_scene=False
)

val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=b, shuffle=False,
                                         drop_last=False, num_workers=11 if b > 1 else 0, pin_memory=True)

model = eval(config.model)(config)
# model = PointNet2SemSegMSG(config)
# pretrained_dict = torch.load('{}'.format('/home/v-wewei/code/checkpoints/test_FocalLoss_018_0.pth.tar'), map_location='cpu')
for i in range(29, 30):
    pretrained_dict = torch.load(
        '/home/v-wewei/code/two_stage_pointnet/checkpoints/test_two_stage_loss_{:03d}_0.pth.tar'.format(i),
        map_location='cpu')
    model.load_state_dict(pretrained_dict['state_dict'], strict=True)

    model.cuda()
    model.eval()
    ratio = AverageMeter()
    recall = AverageMeter()

    with torch.no_grad():
        for i, batch in enumerate(val_loader):
            #            batch = val_loader[0]
            if True:
                data, label, dataset_idx = batch
                print('dataset_idx is : ', dataset_idx)
                data = data.cuda(non_blocking=True)
                cls_label = label[0].cuda(non_blocking=True)
                score_label = label[1].cuda(non_blocking=True)
                pose_label = label[2].cuda(non_blocking=True)
                matrix_label = label[3].cuda(non_blocking=True)
                if not config.non_uniform_sampling:
                    print('non uniform_sampling')
                    idx = furthest_point_sample(data, int(config.num_points / config.sample_rate))
                    data = gather_operation(
                        data.transpose(1, 2).contiguous(),
                        idx).transpose(1, 2).contiguous()
                    cls_label = gather_operation(
                        cls_label.float().unsqueeze(dim=2).transpose(1, 2).contiguous(),
                        idx).squeeze().long().contiguous()
                    pose_label = gather_operation(
                        pose_label.transpose(1, 2).contiguous(),
                        idx).transpose(1, 2).contiguous()
                # z_vec = z_vec.cuda(non_blocking=True)
                fg_mask = (cls_label.view(-1) > 0)
                # print('fg_sum is:', fg_mask.long().sum())
                fg_mask_ = (cls_label.squeeze() > 0).long()
                kwargs = {'cls_label': cls_label, 'pose_label': pose_label, 'score_label': score_label,
                          'matrix_label': matrix_label, 'mode': mode}
                batch_dict = model(data, **kwargs)

                data_ = data.detach().cpu().numpy().squeeze()
                # print('data shape is : ', data_.shape)

                cls_label = cls_label.detach().cpu().numpy().squeeze()
                cls_pred = batch_dict['batch_cls_preds'].detach().cpu().numpy().squeeze()
                pose_pred = batch_dict['batch_grasp_preds']
                pose_pred2 = batch_dict['new_batch_grasp_preds']
                pose_pred3 = batch_dict['gt_grasps']
                #  print(batch_dict.keys())

                rois_width = batch_dict['rois'].detach().cpu().numpy().squeeze()[:, 9]
                new_grasp_width = batch_dict['new_batch_grasp_preds'].detach().cpu().numpy().squeeze()[:, 9]
                gt_width = pose_label.detach().cpu().numpy().squeeze()[:, 6]
                selected_index = batch_dict['selected'].cpu().numpy()
                non_zero_index = np.where(gt_width[selected_index] > 0)

                show = True
                if show:
                    if mode in ['train', 'validate']:
                        red = np.where(cls_label == 1)
                        red_ = np.where(cls_label != 1)
                        pointcloud_red = trimesh.PointCloud(data_[red], colors=[255, 0, 0])
                        pointcloud_red_ = trimesh.PointCloud(data_[red_], colors=[0, 255, 0])
                        pose_label = pose_label.detach().cpu().numpy().squeeze()

                    # print('max is :', cls_pred.max())
                    # print('min is :', cls_pred.min())
                    blue = np.where(cls_pred >= config.seg_thresh)
                    blue_ = np.where(cls_pred < config.seg_thresh)
                    pointcloud_blue = trimesh.PointCloud(data_[blue], colors=[0, 0, 255])
                    pointcloud_blue_ = trimesh.PointCloud(data_[blue_], colors=[0, 255, 0])

                    point_cloud = trimesh.PointCloud(data_, colors=[0, 255, 0])
                    selected_box_inds = batch_dict['selected'].detach().cpu().numpy()
                    # print(selected_box_inds)
                    # sampled_box_inds = batch_dict['target_sampled_inds'].detach().cpu().numpy()
                    # show points in the first box
                    key_ = 20
                    selected_points = batch_dict['selected_points'].squeeze()[key_].detach().cpu().numpy()
                    # selected_points = batch_dict['selected_points'].squeeze().detach().cpu().numpy()
                    # print('selected_points shape is : ', selected_points.shape)
                    ori_points = batch_dict['ori_points'].squeeze().detach().cpu().numpy()
                    pointcloud_selected_points = trimesh.PointCloud(selected_points, colors=[255, 0, 0])
                    pointcloud_ori = trimesh.PointCloud(ori_points, colors=[0, 255, 0])

                    #filter window
                    x = ori_points[:, 0]
                    y = ori_points[:, 1]
                    z = ori_points[:, 2]
                    index_window_x = np.where((x < 0.168) & (x > -0.168))
                    index_window_y = np.where((y < 0.165) & (y > -0.165))
                    index_window_z = np.where(z > 0.02)
                    index_window = np.intersect1d(index_window_x, index_window_y, index_window_z)
                    ori_points_window = ori_points[index_window]
                    pointcloud_ori_window = trimesh.PointCloud(ori_points_window, colors=[0, 255, 0])
                    # print(pose_pred.shape,pose_pred2.shape,pose_pred3.shape)
                    R = pose_pred[:, :, [3, 4, 5, 6, 7, 8, 10, 11, 12]].view(-1, 3, 3).transpose(-1,
                                                                                                 -2).detach().cpu().numpy()[
                        selected_box_inds]
                    # R = R[non_zero_index]
                    R2 = pose_pred2[:, :, [3, 4, 5, 6, 7, 8, 10, 11, 12]].view(-1, 3, 3).transpose(-1,
                                                                                                   -2).detach().cpu().numpy()
                    # R2 = R2[non_zero_index]
                    R3 = matrix_label.detach().cpu().numpy().squeeze()[selected_box_inds]
                    #R3 = R3[non_zero_index]
                    pose_pred = pose_pred.detach().cpu().numpy().squeeze()
                    pose_pred2 = pose_pred2.detach().cpu().numpy().squeeze()
                    pose_pred3 = pose_pred3.detach().cpu().numpy().squeeze()
                    #                    print('R',R.shape,R[selected_box_inds].shape)
                    # stage1_center =pose_pred[selected_box_inds][non_zero_index][:, :3]
                    stage1_center = pose_pred[selected_box_inds][:, :3]
                    # stage2_center = pose_pred2[non_zero_index][:,:3]
                    stage2_center = pose_pred2[:, :3]
                    #gt_center = pose_label[selected_box_inds[non_zero_index], :3] +  data_[selected_box_inds][non_zero_index]
                    gt_center = pose_label[selected_box_inds, :3] + data_[selected_box_inds]
                    pred_width = pose_pred[:, 9]
                    # pred_width2 = pose_pred2[:,9]
                    stage1_w = pred_width[selected_box_inds]
                    # stage1_w = stage1_w[non_zero_index]

                    new_grasp_width = batch_dict['new_batch_grasp_preds'].detach().cpu().numpy().squeeze()[:, 9]
                    new_label = torch.sigmoid(batch_dict['new_batch_cls_preds']).detach().cpu().numpy().squeeze()
                    final_index = (new_label > 0.98)
                    # print(final_index)

                    stage2_w = new_grasp_width
                    # stage2_w = stage2_w[non_zero_index]
                    gt_w = gt_width[selected_box_inds]
                    #gt_w = np.expand_dims(gt_w[non_zero_index],1)
                    #print(gt_w.shape)

                    # print(pose_pred[selected_box_inds][:, :3])
                    # R = pose_pred[:, [3, 4, 5, 6, 7, 8, 10, 11, 12]].view(-1, 3, 3).transpose(-1, -2)
                    # path = draw_box(
                    #     pose_pred[selected_box_inds][:, :3],
                    #     R[selected_box_inds],
                    #     #w=targets_dict['rois'][0, 0, 9].detach().cpu().numpy()
                    #     )

                    path_1 = draw_box(
                        stage1_center,
                        R,
                        # np.expand_dims(R,),
                        # w=stage1_w[key_]
                        # w=targets_dict['rois'][0, 0, 9].detach().cpu().numpy()
                    )
                    # print(targets_dict['gt_of_rois'][0, 0])
                    # exit()
                    path_2 = draw_box(
                        stage2_center[final_index],
                        R2[final_index],
                        # np.expand_dims(R2, 0),
                        # w=stage2_w[key_]
                    )
                    path_3 = draw_box(
                        gt_center,
                        R3,
                        #np.expand_dims(R3, 0),
                       # w=gt_w
                    )
                    print(R3[:1, :])
                    colors = np.ones((len(path_1.entities), 3))
                    colors2 = np.ones((len(path_2.entities), 3))
                    colors3 = np.ones((len(path_3.entities), 3))
                    colors_1 = (colors * [255, 0, 0]).astype(np.uint8)
                    colors_2 = (colors2 * [255, 255, 51]).astype(np.uint8)
                    colors_3 = (colors3 * [247, 144, 61]).astype(np.uint8)
                    path_1.colors = colors_1
                    path_2.colors = colors_2
                    path_3.colors = colors_3
                    pc_selected_points = trimesh.PointCloud(selected_points, colors=[0, 0, 255])
                    print('here')
                    scene_p = trimesh.Scene()
                    scene = trimesh.Scene()
                    scene1 = trimesh.Scene()
                    # scene.add_geometry(point_cloud)
                    scene_p.add_geometry(pointcloud_blue_)
                    scene_p.add_geometry(pointcloud_blue)
                    scene_t = trimesh.Scene()
                    scene_t.add_geometry(pointcloud_red)
                    scene_t.add_geometry(pointcloud_red_)
                    # Application(scene_p, scene_t)
                    #scene.add_geometry(path_1)
                    # scene.add_geometry(path_2)
                    scene.add_geometry(path_3)
                    scene1.add_geometry(path_3)
                    # scene.add_geometry(point_cloud)
                    scene.add_geometry(pointcloud_ori_window)
                    scene1.add_geometry(pc_selected_points)
                    # scene.add_geometry(gripper)
                    bin = trimesh.load_mesh('/home/v-wewei/code/two_stage_pointnet/dataset/data/bin/bin.obj')
                    T = trimesh.transformations.translation_matrix([0.007507, -0.00926, -0.0653])
                    bin.apply_transform(T)
                    bin.visual.face_colors = [190, 190, 190]
                    # scene.add_geometry(bin)
                    # Application(scene,scene1)
                    scene.show()
                    # scene1.show()
                    # print(dir(o3d))
                    selected_points_o3d = o3d.PointCloud()
                    selected_points_o3d.points = o3d.Vector3dVector(selected_points)
                    # selected_points_o3d.colors = [1, 0, 0]
                    # color = [247/255,144/255,61/255]
                    color = [0, 0, 0]
                    box_sample = draw_box2(gt_center[key_],
                                           # R3,
                                           np.expand_dims(R3[key_], 0))
                    box_sample = box_sample.squeeze()
                    object_bbox = bounding_box(box_sample, color)
                    # print(dir(selected_points_o3d))
                    selected_points_o3d.paint_uniform_color([1, 91 / 255, 78 / 255])
                    o3d.visualization.draw_geometries([selected_points_o3d])



# except:
#  else:
#      pass
'''


