import torch
import numpy as np
import torch.nn as nn
import torchvision.transforms as transforms
import sys
sys.path.append('../')
import dataset.data_utils as d_utils
from dataset.BindatasetLoader import Bindataset
#from models.pointnet2_ssg_sem import PointNet2SemSegSSG
from models.pointnet2_msg_sem import PointNet2SemSegMSG
from models.roi_head import RoI_Head
#from common.utils.config import merge_cfg_into_cfg
from common.lib.metric import AverageMeter
from common.lib.bbox import decode_bbox_target, angle_to_vector, grasp_angle_to_vector, rotation_from_vector
from utils.pointnet2_utils import furthest_point_sample, gather_operation
import trimesh
import yaml
import os
import trimesh.viewer
import pyglet
import glooey



os.environ['CUDA_VISIBLE_DEVICES']= '0'

class Application:

    """
    Example application that includes moving camera, scene and image update.
    """

    def __init__(self, scene_1, scene_2):
        # create window with padding
        self.width, self.height = 920 * 2, 960
        window = self._create_window(width=self.width, height=self.height)

        gui = glooey.Gui(window)

        hbox = glooey.HBox()
        hbox.set_padding(5)

        # scene widget for changing camera location
        self.scene_widget1 = trimesh.viewer.SceneWidget(scene_1)
        hbox.add(self.scene_widget1)

        # scene widget for changing scene
        self.scene_widget2 = trimesh.viewer.SceneWidget(scene_2)
        hbox.add(self.scene_widget2)

        # integrate with other widget than SceneWidget

        gui.add(hbox)

        pyglet.app.run()


    def _create_window(self, width, height):
        try:
            config = pyglet.gl.Config(sample_buffers=1,
                                      samples=4,
                                      depth_size=24,
                                      double_buffer=True)
            window = pyglet.window.Window(config=config,
                                          width=width,
                                          height=height)
        except pyglet.window.NoSuchConfigException:
            config = pyglet.gl.Config(double_buffer=True)
            window = pyglet.window.Window(config=config,
                                          width=width,
                                          height=height)

        @window.event
        def on_key_press(symbol, modifiers):
            if modifiers == 0:
                if symbol == pyglet.window.key.Q:
                    window.close()

        return window

def accuracy(output, target, topk=(1,)):
    """Computes the accuracy over the k top predictions for the specified values of k"""
    with torch.no_grad():
        output = output.transpose(1, 2)
        output = output.contiguous().view(-1, output.size(-1))
        target = target.view(-1)
        maxk = max(topk)
        batch_size = target.size(0)
#        print(index.shape)
#        print('top@', maxk)
        _, pred = output.topk(maxk, 1, True, True)
        pred = pred.t()
#        print('pred', pred)
        correct = pred.eq(target.view(1, -1).expand_as(pred))
        res = []
        for k in topk:
            correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
            res.append(correct_k.mul_(100.0 / batch_size))
        return res, pred

def draw_box(center, R, l=0.041, w=0.04, h=0.018):
    x_corners = [0.004, 0.004, -(l-0.004), -(l-0.004), 0.004, 0.004, -(l -0.004), -(l-0.004)]
    #x_corners = [x + center[0][0] for x in x_corners] 
    y_corners = [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2]
    #y_corners = [y + center[0][1] for y in y_corners]
    z_corners = [h / 2, h / 2, h / 2, h / 2, -h / 2, -h / 2, -h / 2, -h / 2] 
    #z_corners = [z + center[0][2] for z in z_corners]
    #print(np.vstack([x_corners, y_corners, z_corners]))
    corners_3d = np.dot(R, np.vstack([x_corners, y_corners, z_corners]))
    box_sample = corners_3d + np.expand_dims(center, -1)
    box_sample = box_sample.transpose(0, 2, 1)
    
    ray_origins = np.array([box_sample[:, 0], box_sample[:, 2], box_sample[:, 5], box_sample[:, 7]]*3)
   # ray_origins = np.array([box_sample[:, 2], box_sample[:, 3]]*2)
    x_axis = R[:, :, 0]
    y_axis = R[:, :, 1]
    z_axis = R[:, :, 2]
    ray_directions = np.array([-w*y_axis, w*y_axis, w*y_axis, -w*y_axis,  -l*x_axis, l*x_axis, -l*x_axis, l*x_axis, -h*z_axis, -h*z_axis, h*z_axis, h*z_axis]) #[-0.004, 0.004, 0.004, -0.004]
    #ray_directions = np.array([l*x_axis, l*x_axis, -w*y_axis, w*y_axis]) #[-0.004, 0.004, 0.004, -0.004]
    #ray_directions = np.array([l*x_axis, l*x_axis, w*y_axis, -w*y_axis]) #[-0.004, 0.004, 0.004, -0.004]
    print(ray_origins.shape)
    print(ray_directions.shape)
    path = trimesh.load_path(np.hstack((ray_origins.reshape(-1, 3), (ray_origins + ray_directions).reshape(-1, 3))).reshape(-1, 2, 3))
    #exit()
    #print(ray_origins.shape)
    #print(np.expand_dims(ray_directions, 1).shape)
    #temp = ray_origins+np.expand_dims(ray_directions, 1)
    #print(ray_origins[0][0], temp[0][0])
    #print(ray_origins[0], temp[0][1])


    #exit()
    #print(ray_origins + np.expand_dims(ray_directions, 1))
    #print(ray_origins.transpose(1, 0, 2).shape)
    #path = trimesh.load_path(np.hstack((ray_origins, ray_origins + ray_directions)))
    return path
    #path.show()
    #exit()
    

b = 1
from common.utils.config_ import cfg, cfg_from_yaml_file, merge_new_config_file
config = cfg_from_yaml_file('../experiments/base_config.yaml', cfg)
config = merge_new_config_file(config, '../experiments/local_msg_classification_config.yaml')
#config = merge_cfg_into_cfg('../experiments/local_msg_classification_config.yaml', '../experiments/base_config.yaml')
if not config.non_uniform_sampling:
    config.num_points *= config.sample_rate

val_dataset = Bindataset(
    dataset_dir='/home/v-wewei/code/two_stage_pointnet/mask_label/',
    num_points=int(config.num_points),
    transforms=transforms.Compose([
    d_utils.PointcloudToTensor(),
    d_utils.PointcloudJitter(std=0.0001, clip=0.0001),]),
    train=True,
    use_normal=config.use_normal,
    platform=config.platform
    )

val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=b, shuffle=False,
    drop_last=False, num_workers=16 if b > 1 else 0, pin_memory=True)

model = eval(config.model)(config)
#pretrained_dict = torch.load('{}'.format('/home/v-wewei/code/checkpoints/test_FocalLoss_018_0.pth.tar'), map_location='cpu')
for i in range(14, 15):
    pretrained_dict = torch.load('/home/v-wewei/code/two_stage_pointnet/checkpoints/test_SigmoidFocalLoss_{:03d}_0.pth.tar'.format(i), map_location='cpu')
    model.load_state_dict(pretrained_dict['state_dict'], strict=True)

    model.cuda()
    model.eval()
    model_roi = RoI_Head(input_channels=128, model_cfg=config.ROI_HEAD, num_class=1).cuda()
    model_roi.train()
    ratio = AverageMeter()
    recall = AverageMeter()
    with torch.no_grad():
        for i, batch in enumerate(val_loader):
#            batch = val_loader[0]
            data, label, dataset_idx = batch
            data = data.cuda(non_blocking=True)
            cls_label = label[0].cuda(non_blocking=True)
            pose_label = label[2].cuda(non_blocking=True)
            if not config.non_uniform_sampling:
                idx = furthest_point_sample(data, int(config.num_points/config.sample_rate))
                data = gather_operation(
                    data.transpose(1, 2).contiguous(),
                    idx).transpose(1, 2).contiguous()
                cls_label = gather_operation(
                    cls_label.float().unsqueeze(dim=2).transpose(1, 2).contiguous(),
                    idx).squeeze().long().contiguous()
                pose_label = gather_operation(
                    pose_label.transpose(1, 2).contiguous(),
                    idx).transpose(1, 2).contiguous()
            #z_vec = z_vec.cuda(non_blocking=True)
            fg_mask = (cls_label.view(-1) > 0)
            fg_mask_ = (cls_label.squeeze()>0).long()

            cls_pred, pose_pred = model(data)
    #        output = output.sigmoid()
            #output = m(output)
            #res, pred = accuracy(output.data, cls, topk=(1,))
            data_ = data.detach().cpu().numpy().squeeze()
            print('data shape is : ', data_.shape)
            #left = np.where(output_ > 0.3)
            #
            #print(left)
            #print(cls.shape)
            #print(np.where(cls > 0.3))
            cls_label = cls_label.detach().cpu().numpy().squeeze()
            cls_pred = torch.sigmoid(cls_pred.squeeze())

            B, N, C = pose_pred.size()
            pose_pred = pose_pred.view(B*N, C)
            _, _, C = pose_label.size()
            pose_label = pose_label.view(B*N, C)

            pose_pred, width, R = decode_bbox_target(
                pose_pred, config.loc_scope, config.loc_bin_size,
                azimuth_scope=config.azimuth_scope,
                azimuth_bin_size=config.azimuth_bin_size,
                elevation_scope=config.elevation_scope,
                elevation_bin_size=config.elevation_bin_size,
                width_scope=config.width_scope,
                width_bin_size=config.width_bin_size,
                grasp_angle_scope=config.grasp_angle_scope,
                grasp_angle_bin_size=config.grasp_angle_bin_size)
            approach_vector = R[:, 0]
            closing_vector = R[:, 1]
            grasp_o_vector = R[:, 2]
            #save_feature = True
            #save_feature = False
            use_roi_head = True
            if use_roi_head:
                pose_pred = pose_pred.reshape(-1, config.num_points, 3)
                pose_pred += data
                width = width.reshape(-1, config.num_points, 1)
                approach_vector = approach_vector.reshape(-1, config.num_points, 3)
                closing_vector = closing_vector.reshape(-1, config.num_points, 3)
                grasp_o_vector = grasp_o_vector.reshape(-1, config.num_points, 3)
                grasp = torch.cat((pose_pred, approach_vector, closing_vector, width, grasp_o_vector), dim=-1)
               # box = box.cpu().numpy()
                print('grasp shape is :', grasp.shape)
                #cls_pred_ = np.expand_dims(cls_pred, axis=-1)
                #np.save('grasp.npy', box)
                #np.save('cls_pred.npy', cls_pred_)
                
                #pose_label = pose_label[fg_mask, :3]
                approach_vector_label = angle_to_vector(pose_label[fg_mask, 3], pose_label[fg_mask, 4]).transpose(0, 1)
                closing_vector_label = grasp_angle_to_vector(pose_label[fg_mask, 7]).transpose(0, 1)
                #print('approach_vector_label shape is : ', approach_vector_label.shape)
                #print('closing_vector_label shape is : ', closing_vector_label.shape)
                R_ = rotation_from_vector(approach_vector_label, closing_vector_label)
                approach_vector_label = R_[:, 0].reshape(-1, 3)
                closing_vector_label = R_[:, 1].reshape(-1, 3)
                grasp_z_vector_label = R_[:, 2].reshape(-1, 3)
                width_label = pose_label[fg_mask, 6].unsqueeze(dim=-1)
                data = data.reshape(-1, 3)
                center = pose_label[fg_mask, :3] + data[fg_mask] 
                #print(approach_vector_label.shape, closing_vector_label.shape, width_label.shape, center.shape)
                gt_grasp = torch.cat((center, approach_vector_label, closing_vector_label, width_label,grasp_z_vector_label), dim=-1)
                np_point_features = np.load('./point_features.npy')
                point_features = torch.from_numpy(np_point_features).transpose(1, 2).contiguous()
                batch_dict = {'batch_size':1, 'batch_cls_preds': cls_pred.unsqueeze(dim=0).unsqueeze(dim=-1),'batch_grasp_preds': grasp, 'gt_grasps': gt_grasp,'point_coords': data, 'point_features': point_features.cuda()}
                targets_dict = model_roi(batch_dict)
                show = True

                if show:
                    pose_pred = pose_pred.detach().cpu().numpy().squeeze()
                    R = R.detach().cpu().numpy()
                    point_cloud = trimesh.PointCloud(data_, colors=[0, 255, 0])
                    #print(pose_pred.shape)
                    #print(R.shape)
                    selected_box_inds = batch_dict['selected'].detach().cpu().numpy()
                    sampled_box_inds = targets_dict['sampled_inds'].detach().cpu().numpy()
                    path = draw_box(pose_pred[selected_box_inds], R[selected_box_inds])
                    scene = trimesh.Scene()
                    scene.add_geometry(path)
                    scene.add_geometry(point_cloud)
                    path_1 = draw_box(pose_pred[selected_box_inds][sampled_box_inds], R[selected_box_inds][sampled_box_inds])
                    scene_1 = trimesh.Scene()
                    scene_1.add_geometry(path_1)
                    scene_1.add_geometry(point_cloud)
                    
                print(targets_dict.keys())
               # gt_box = gt_box.cpu().numpy()
                #np.save('gt_grasp.npy', gt_box)
                #np.save('fg_list.npy', fg_mask_.sum(dim=1).cpu().numpy())
            #exit()
            #R = rotation_from_vector(approach_vector, closing_vector)
            fg_mask = fg_mask.cpu().numpy()

            approach_vector = approach_vector.cpu().numpy().squeeze()
            approach_vector_label = angle_to_vector(pose_label[fg_mask, 3], pose_label[fg_mask, 4]).transpose(0, 1).cpu().numpy().squeeze()
            closing_vector_label = grasp_angle_to_vector(pose_label[fg_mask, 7]).transpose(0, 1).cpu().numpy().squeeze()
            pose_label = pose_label.cpu().numpy().squeeze()
            pose_pred = pose_pred.cpu().numpy().squeeze()
            width = width.cpu().numpy().squeeze()
            closing_vector = closing_vector.cpu().numpy().squeeze()
            #z_vec = z_vec.cpu().numpy().squeeze()
            

#            print(pred.max())
#            exit()
#            print(pred.shape)


            cls_pred = cls_pred.detach().cpu().numpy().reshape(-1, int(config.num_points)).squeeze()
            red = np.where(cls_label == 1)
            red_ = np.where(cls_label != 1)
            blue = np.where(cls_pred >= config.seg_thresh)
            blue_ = np.where(cls_pred < config.seg_thresh)
            pose_label[fg_mask, :3] += data_[fg_mask]
            pose_pred[fg_mask] += data_[fg_mask]
            #print(data_.shape)
            #print(data_[red].shape)
            #print(data_[blue].shape)

            #pointcloud = trimesh.PointCloud(data_, colors=[0, 255, 0])
            #pointcloud.show()
            pointcloud_pred = trimesh.PointCloud(data_[blue], colors=[0, 0, 255])
            pointcloud_pred_ = trimesh.PointCloud(data_[blue_], colors=[0, 255, 0])
            pointcloud_true = trimesh.PointCloud(data_[red], colors=[255, 0, 0])
            pointcloud_true_ = trimesh.PointCloud(data_[red_], colors=[0, 255, 0])
            center = trimesh.PointCloud(pose_label[fg_mask, :3], colors=[255, 255, 0]) 
            center_ = trimesh.PointCloud(pose_pred[fg_mask], colors=[0, 255, 255])
            path = draw_box(pose_pred[fg_mask][[0]], R[fg_mask][[0]])
            ray_origins = data_[red]
            ray_directions = approach_vector[fg_mask]
            #ray_directions = z_vec[fg_mask]
            ray_visualize_1 = trimesh.load_path(np.hstack((ray_origins, ray_origins+ray_directions / 50)).reshape(-1, 2, 3))
            #ray_directions = approach_vector[fg_mask]
            ray_directions = approach_vector_label
            ray_visualize_2 = trimesh.load_path(np.hstack((ray_origins, ray_origins+ray_directions / 50)).reshape(-1, 2, 3))
            print(closing_vector.shape)
            ray_directions = closing_vector[fg_mask]
            #ray_directions = closing_vector_label
            ray_visualize_3 = trimesh.load_path(np.hstack((ray_origins, ray_origins+ray_directions / 50)).reshape(-1, 2, 3))
            ray_visualize_1.colors=[[0, 0, 255]] * approach_vector[fg_mask].shape[0]
            ray_visualize_3.colors=[[255, 255, 0]] * approach_vector[fg_mask].shape[0]
            
            scene_1 = trimesh.Scene()
            scene_1.add_geometry(pointcloud_true)
            scene_1.add_geometry(pointcloud_true_)
            #scene_1.add_geometry(center)
            #scene_1.add_geometry(center_)
            #scene_1.add_geometry(path)
            #scene_1.add_geometry(ray_visualize_3)
            #scene_1.show()
           # scene_1.add_geometry(ray_visualize_1)
           # scene_1.add_geometry(ray_visualize_2)

            #scene_1.add_geometry(ray_visualize_2)
            #scene.add_geometry(pointcloud_pred)
            scene_2 = trimesh.Scene()
            scene_2.add_geometry(pointcloud_pred)
            scene_2.add_geometry(pointcloud_pred_)
            Application(scene_1, scene_2)

            for pred_, cls_ in zip(cls_pred, cls_label):
                area_1 = np.where(pred_ >= config.seg_thresh)
                area_2 = np.where(cls_ == 1)
                #print(area_1[0])
                #set_a = set()
                #[set_a.add(i) for i in area_1[0]]
                #print(set_a)
    
                area_1 = set(area_1[0])
                area_2 = set(area_2[0])
                U = set.union(area_1, area_2)
                I = set.intersection(area_1, area_2)
                if len(U) == 0:
                    ratio_ = 0
                else:
                    ratio_ = len(I) / len(U)
                if len(area_2) ==0:
                    ratio_recall = 0
                else:
                    ratio_recall = len(I) / len(area_2)
                ratio.update(ratio_, 1)
                recall.update(ratio_recall, 1)
        print('IOU is : ', ratio.avg)
        print('Recall is : ', recall.avg)
    
