import torch
import numpy as np
import torch.nn as nn
import torchvision.transforms as transforms
import sys
import transforms3d as t3d
import open3d as o3d

sys.path.append('../')
import dataset.data_utils as d_utils
from dataset.BindatasetLoader import Bindataset
from dataset.SuctionBindatasetLoader import SuctionBindataset
from models.two_stage_graspnet import Two_Stage_GraspNet
from models.pointnet2_msg_sem import PointNet2SemSegMSG
from models.roi_head import RoI_Head
# from common.utils.config import merge_cfg_into_cfg
from common.lib.metric import AverageMeter
from common.lib.bbox import decode_bbox_target, angle_to_vector, grasp_angle_to_vector, rotation_from_vector
from utils.pointnet2_utils import furthest_point_sample, gather_operation
import trimesh
import yaml
import os
import trimesh.viewer
import pyglet
import glooey
import torch.backends.cudnn as cudnn
import random
import torch.nn.functional as F

import math

os.environ['CUDA_VISIBLE_DEVICES'] = '0'
random.seed(1)
np.random.seed(1)
torch.manual_seed(1)
torch.cuda.manual_seed(1)
cudnn.benchmark = False
cudnn.determinstic = True


class Application:
    """
    Example application that includes moving camera, scene and image update.
    """

    def __init__(self, scene_1, scene_2):
        # create window with padding
        self.width, self.height = 920 * 2, 960
        window = self._create_window(width=self.width, height=self.height)

        gui = glooey.Gui(window)

        hbox = glooey.HBox()
        hbox.set_padding(5)

        # scene widget for changing camera location
        self.scene_widget1 = trimesh.viewer.SceneWidget(scene_1)
        hbox.add(self.scene_widget1)

        # scene widget for changing scene
        self.scene_widget2 = trimesh.viewer.SceneWidget(scene_2)
        hbox.add(self.scene_widget2)

        # integrate with other widget than SceneWidget

        gui.add(hbox)

        pyglet.app.run()

    def _create_window(self, width, height):
        try:
            config = pyglet.gl.Config(sample_buffers=1,
                                      samples=4,
                                      depth_size=24,
                                      double_buffer=True)
            window = pyglet.window.Window(config=config,
                                          width=width,
                                          height=height)
        except pyglet.window.NoSuchConfigException:
            config = pyglet.gl.Config(double_buffer=True)
            window = pyglet.window.Window(config=config,
                                          width=width,
                                          height=height)

        @window.event
        def on_key_press(symbol, modifiers):
            if modifiers == 0:
                if symbol == pyglet.window.key.Q:
                    window.close()

        return window




b = 1
mode = 'validate'
# mode = 'train'
from common.utils.config_ import cfg, cfg_from_yaml_file, merge_new_config_file

config = cfg_from_yaml_file('../experiments/base_config.yaml', cfg)
config = merge_new_config_file(config, '../experiments/lifuyu_config.yaml')
# config = merge_cfg_into_cfg('../experiments/local_msg_classification_config.yaml', '../experiments/base_config.yaml')
if not config.non_uniform_sampling:
    config.num_points *= config.sample_rate
val_dataset = SuctionBindataset(
    dataset_dir=config.test_dataset_dir,
    # dataset_dir='/home/v-wewei/VV_grasp_dataset/',
    num_points=int(config.num_points),
    transforms=transforms.Compose([
        d_utils.PointcloudToTensor(),
        # d_utils.PointcloudJitter(std=0.0001, clip=0.0001),
    ]),
    mode=mode,
    use_normal=config.use_normal,
    platform=config.platform,
    # non_uniform_sampling=False,
    aug_scene=False
)

val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=b, shuffle=False,
                                         drop_last=False, num_workers=11 if b > 1 else 0, pin_memory=True)

model = eval(config.model)(config)

for i in range(25, 26):
    pretrained_dict = torch.load(
        '/home/v-wewei/code/two_stage_pointnet/suction_checkpoints/test_two_stage_{:03d}_0.pth.tar'.format(i),
        map_location='cpu')
    model.load_state_dict(pretrained_dict['state_dict'], strict=True)

    model.cuda()
    model.eval()
    ratio = AverageMeter()
    recall = AverageMeter()
    with torch.no_grad():
        flag = 0
        for i, batch in enumerate(val_loader):
            #            batch = val_loader[0]
            if True:
                data, label, dataset_idx = batch
                print('dataset_idx is : ', dataset_idx)
                # if not flag == 6:
                #     flag += 1
                #     continue
                data = data.cuda(non_blocking=True)
                cls_label = label[0].cuda(non_blocking=True)
                score_label = label[1].cuda(non_blocking=True)
                pose_label = label[2].cuda(non_blocking=True)
                objectness_label = label[3].cuda(non_blocking=True)
                if config.train_two_stage:
                    matrix_label = label[3].cuda(non_blocking=True)

                if not config.non_uniform_sampling:
                    print('non uniform_sampling')
                    idx = furthest_point_sample(data, int(config.num_points / config.sample_rate))
                    data = gather_operation(
                        data.transpose(1, 2).contiguous(),
                        idx).transpose(1, 2).contiguous()
                    cls_label = gather_operation(
                        cls_label.float().unsqueeze(dim=2).transpose(1, 2).contiguous(),
                        idx).squeeze().long().contiguous()
                    pose_label = gather_operation(
                        pose_label.transpose(1, 2).contiguous(),
                        idx).transpose(1, 2).contiguous()
                # z_vec = z_vec.cuda(non_blocking=True)
                fg_mask = (cls_label.view(-1) > 0)
                # print('fg_sum is:', fg_mask.long().sum())
                fg_mask_ = (cls_label.squeeze() > 0).long()
                if config.train_two_stage:
                    kwargs = {'cls_label': cls_label, 'score_label': score_label, 'pose_label': pose_label,
                              'matrix_label': matrix_label, 'mode': 'validate'}
                else:
                    kwargs = {'cls_label': cls_label, 'score_label': score_label, 'pose_label': pose_label,
                              'objectness_label': objectness_label , 'mode': 'validate'}

                batch_dict = model(data, **kwargs)

                data_ = data.detach().cpu().numpy().squeeze()
                # print('data shape is : ', data_.shape)

                cls_label = cls_label.detach().cpu().numpy().squeeze()
                cls_pred = batch_dict['batch_cls_preds'].detach().cpu().numpy().squeeze()
                score_pred = batch_dict['batch_score_preds'].detach().cpu().numpy().squeeze()
                pose_pred = batch_dict['batch_suction_pose_preds']
                pose_pred = F.normalize(pose_pred,dim=2)
                pose_pred = pose_pred.detach().cpu().numpy().squeeze()
                pose_label = pose_label.detach().cpu().numpy().squeeze()
                score_mask  = np.where(score_pred > 0.85)
                objectness_pred = batch_dict['batch_objectness_preds'].detach().cpu().numpy().squeeze()




                show = True
                if show:
                    if mode in ['train', 'validate']:
                        red = np.where(cls_label == 1)
                        red_ = np.where(cls_label != 1)
                        red_score = np.intersect1d(red, score_mask)
                        others_points = np.setdiff1d(red,red_score)
                        pointcloud_red = trimesh.PointCloud(data_[red], colors=[255, 0, 0])
                        pointcloud_red_ = trimesh.PointCloud(data_[red_], colors=[0, 255, 0])
                        pointcloud_red_score = trimesh.PointCloud(data_[red_score], colors=[255, 255, 0])
                        pointcloud_others = trimesh.PointCloud(data_[others_points],colors=[255,0,0])




                    scene1 = trimesh.Scene()
                    scene1.add_geometry(pointcloud_red_score)
                    scene1.add_geometry(pointcloud_others)
                    scene1.add_geometry(pointcloud_red_)

                    result = np.sum(pose_label[red] * pose_pred[red], axis=1)
                    angle = np.arccos(result) / np.pi * 180
                    threshold_angle = 10
                    index = np.where(angle > threshold_angle)
                    index = np.asarray(index).squeeze()
                    print('has {} total suction normals'.format(angle.shape[0]))
                    print('{} suction normals < 10 degree '.format(angle.shape[0] - index.shape[0]))
                    print('{}% suction normals < 10 degree'.format((angle.shape[0] - index.shape[0])/angle.shape[0]*100))

                    ray_origins = data_[red][index]
                    ray_directions = pose_label[red][index]
                    vis_path = np.hstack((ray_origins, ray_origins+ray_directions/100)).reshape(-1, 2, 3)
                    ray_visualize = trimesh.load_path(vis_path)

                    ray_directions_ = pose_pred[red][index]
                    vis_path_pred = np.hstack((ray_origins, ray_origins+ray_directions_/100)).reshape(-1, 2, 3)
                    ray_visualize_pred = trimesh.load_path(vis_path_pred)

                    colors = np.ones((len(ray_visualize.entities), 3))
                    colors2 = np.ones((len(ray_visualize_pred.entities), 3))
                    colors_1 = (colors * [255, 0, 0]).astype(np.uint8)   #label_vis
                    colors_2 = (colors2 * [255,255, 51]).astype(np.uint8) #pred_vis
                    ray_visualize.colors = colors_1
                    ray_visualize_pred.colors = colors_2
                    print('show normal')
                    scene1.add_geometry(ray_visualize)
                    scene1.add_geometry(ray_visualize_pred)

                    blue = np.where(cls_pred >= config.seg_thresh+0.1)
                    blue_ = np.where(cls_pred < config.seg_thresh+0.1)
                    blue_score = np.intersect1d(blue, score_mask)
                    blue_others_points = np.setdiff1d(blue, blue_score)
                    pointcloud_blue = trimesh.PointCloud(data_[blue], colors=[0, 0, 255])
                    pointcloud_blue_score = trimesh.PointCloud(data_[blue_score], colors=[255, 255, 0])
                    pointcloud_blue_ = trimesh.PointCloud(data_[blue_], colors=[0, 255, 0])
                    pointcloud_blue_others=trimesh.PointCloud(data_[blue_others_points], colors=[0, 0, 255])


                    ray_origins_blue = data_[blue]

                    ray_directions_blue = pose_pred[blue]
                    vis_path_pred_blue = np.hstack((ray_origins_blue, ray_origins_blue+ray_directions_blue/100)).reshape(-1, 2, 3)
                    ray_visualize_pred_blue = trimesh.load_path(vis_path_pred_blue)
                    # print('show normal')
                    scene2 = trimesh.Scene()
                    # scene2.add_geometry(pointcloud_blue)
                    scene2.add_geometry(pointcloud_blue_)
                    scene2.add_geometry(pointcloud_blue_score)
                    scene2.add_geometry(pointcloud_blue_others)

                    scene2.add_geometry(ray_visualize_pred_blue)

                    Application(scene1 , scene2)






