import torch
import numpy as np
import torch.nn as nn
import torchvision.transforms as transforms
import sys

sys.path.append('../')
import dataset.data_utils as d_utils
from dataset.BindatasetLoader import Bindataset
from dataset.SuctionBindatasetLoader import SuctionBindataset
from dataset.ApproachBindatasetLoader import ApproachBindataset
from dataset.Grasp_SuctionBindatasetLoader import Grasp_SuctionBindataset
from models.two_stage_graspnet import Two_Stage_GraspNet
from models.grasp_suction_net import Two_Stage_GraspNet_Combine
# from models.pointnet2_msg_sem import PointNet2SemSegMSG
from models.pointnet2_msg_sem_grasp_suction import PointNet2SemSegMSG
from models.roi_head import RoI_Head
# from common.utils.config import merge_cfg_into_cfg
from common.lib.metric import AverageMeter
from common.lib.bbox import decode_bbox_target, angle_to_vector, grasp_angle_to_vector, rotation_from_vector
from utils.pointnet2_utils import furthest_point_sample, gather_operation
import trimesh
import yaml
import os
import trimesh.viewer
import pyglet
import glooey
import torch.backends.cudnn as cudnn
import random
import torch.nn.functional as F

import math

os.environ['CUDA_VISIBLE_DEVICES'] = '0'
random.seed(1)
np.random.seed(1)
torch.manual_seed(1)
torch.cuda.manual_seed(1)
cudnn.benchmark = False
cudnn.determinstic = True


class Application:
    """
    Example application that includes moving camera, scene and image update.
    """

    def __init__(self, scene_1, scene_2):
        # create window with padding
        self.width, self.height = 920 * 2, 960
        window = self._create_window(width=self.width, height=self.height)

        gui = glooey.Gui(window)

        hbox = glooey.HBox()
        hbox.set_padding(5)

        # scene widget for changing camera location
        self.scene_widget1 = trimesh.viewer.SceneWidget(scene_1)
        hbox.add(self.scene_widget1)

        # scene widget for changing scene
        self.scene_widget2 = trimesh.viewer.SceneWidget(scene_2)
        hbox.add(self.scene_widget2)

        # integrate with other widget than SceneWidget

        gui.add(hbox)

        pyglet.app.run()

    def _create_window(self, width, height):
        try:
            config = pyglet.gl.Config(sample_buffers=1,
                                      samples=4,
                                      depth_size=24,
                                      double_buffer=True)
            window = pyglet.window.Window(config=config,
                                          width=width,
                                          height=height)
        except pyglet.window.NoSuchConfigException:
            config = pyglet.gl.Config(double_buffer=True)
            window = pyglet.window.Window(config=config,
                                          width=width,
                                          height=height)

        @window.event
        def on_key_press(symbol, modifiers):
            if modifiers == 0:
                if symbol == pyglet.window.key.Q:
                    window.close()

        return window



def draw_box(center, R, l=0.041, w=0.045, h=0.012):

    x_corners = [0.004, 0.004, -(l - 0.004), -(l - 0.004), 0.004, 0.004, -(l - 0.004), -(l - 0.004)]

    y_corners = [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2]

    z_corners = [h / 2, h / 2, h / 2, h / 2, -h / 2, -h / 2, -h / 2, -h / 2]

    corners_3d = np.dot(R, np.vstack([x_corners, y_corners, z_corners]))

    box_sample = corners_3d + np.expand_dims(center, -1)
    # box_sample = box_sample.transpose(0, 2, 1)

    ray_origins = np.array([box_sample[:, 0], box_sample[:, 2], box_sample[:, 5], box_sample[:, 7]] * 3)

    x_axis = R[:, 0]
    y_axis = R[:, 1]
    z_axis = R[:, 2]
    ray_directions = np.array(
        [-w * y_axis, w * y_axis, w * y_axis, -w * y_axis, -l * x_axis, l * x_axis, -l * x_axis, l * x_axis,
         -h * z_axis, -h * z_axis, h * z_axis, h * z_axis])

    path = trimesh.load_path(
        np.hstack((ray_origins.reshape(-1, 3), (ray_origins + ray_directions).reshape(-1, 3))).reshape(-1, 2, 3))

    return path

def change_ray_color(ray, color):
    colors = np.ones((len(ray.entities), 3))
    colors_1 = (colors * color).astype(np.uint8)  # approach
    ray.colors = colors_1

if __name__ == "__main__":

    np.random.seed(0)
    random.seed(0)
    torch.manual_seed(0)
    b = 1
    mode = 'validate'
    from common.utils.config_ import cfg, cfg_from_yaml_file, merge_new_config_file

    config = cfg_from_yaml_file('../experiments/base_config.yaml', cfg)
    config = merge_new_config_file(config, '../experiments/grasp_suction_combine_config.yaml')

    val_dataset = Grasp_SuctionBindataset(
        dataset_dir=config.test_dataset_dir,
        num_points=int(config.num_points),
        transforms=transforms.Compose([
            d_utils.PointcloudToTensor(),
        ]),
        mode=mode,
        use_normal=config.use_normal,
        platform=config.platform,
        aug_scene=False,
        non_uniform_sampling=True,
    )

    val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=b, shuffle=False,
                                             drop_last=False, num_workers=11 if b > 1 else 0, pin_memory=True)

    model = eval(config.model)(config)


    pretrained_dict = torch.load(
        '/home/v-wewei/code/two_stage_pointnet/suction_checkpoints/test_two_stage_{:03d}_0.pth.tar'.format(15),
        map_location='cpu')
    model.load_state_dict(pretrained_dict['state_dict'], strict=True)

    model.cuda()
    model.eval()
    ratio = AverageMeter()
    recall = AverageMeter()
    with torch.no_grad():
        flag = 0
        for i, batch in enumerate(val_loader):
            #            batch = val_loader[0]
            if True:
                # if not flag == 72:
                #     flag += 1
                #     continue
                data, label, dataset_idx = batch
                print('dataset_idx is : ', dataset_idx)

                data = data.cuda(non_blocking=True)
                if config.train_grasp:
                    if config.train_grasp_approach:
                        grasp_cls_label = label[0].cuda(non_blocking=True)
                        grasp_score_label = label[1].cuda(non_blocking=True)
                        grasp_pose_label = label[2].cuda(non_blocking=True)
                        matrix_label = label[3].cuda(non_blocking=True)
                        distance_label = label[4].cuda(non_blocking=True)
                        normals_label = label[5].cuda(non_blocking=True)
                        suction_cls_label = label[6].cuda(non_blocking=True)
                        suction_score_label = label[7].cuda(non_blocking=True)
                        objectness_label = label[8].cuda(non_blocking=True)

                if config.train_grasp:

                    if config.train_grasp_approach and config.train_suction_combine:
                        kwargs = {'grasp_cls_label': grasp_cls_label, 'grasp_score_label': grasp_score_label,
                                  'grasp_pose_label': grasp_pose_label,
                                  'matrix_label': matrix_label, 'distance_label': distance_label,
                                  'normals_label': normals_label,
                                  'suction_cls_label': suction_cls_label,
                                  'suction_score_label': suction_score_label,
                                  'objectness_label': objectness_label, 'mode': 'train'}

                batch_dict = model(data, **kwargs)

                data_ = data.detach().cpu().numpy().squeeze()

                grasp_cls_label = grasp_cls_label.detach().cpu().numpy().squeeze()

                grasp_cls_pred = batch_dict['batch_grasp_cls_preds'].detach().cpu().numpy().squeeze()

                suction_cls_label = suction_cls_label.detach().cpu().numpy().squeeze()

                suction_cls_pred = batch_dict['batch_suction_cls_preds'].detach().cpu().numpy().squeeze()

                grasp_score_pred = batch_dict['batch_grasp_score_preds'].detach().cpu().numpy().squeeze()

                suction_score_pred = batch_dict['batch_suction_score_preds'].detach().cpu().numpy().squeeze()

                grasp_pose_pred = batch_dict['batch_grasp_preds']

                normals_pred = batch_dict['batch_normals_preds']

                normals_pred = F.normalize(normals_pred,dim=2)

                normals_pred = normals_pred.detach().cpu().numpy().squeeze()

                normals_label = F.normalize(normals_label, dim=2)
                normals_label = normals_label.detach().cpu().numpy().squeeze()

                score_mask  = np.where(grasp_score_pred > 0.5)

                distance_pred = batch_dict['batch_distance_preds'].detach().cpu().numpy().squeeze()

                distance_label=distance_label.detach().cpu().numpy().squeeze()

                grasp_pose_label = grasp_pose_label.cpu().numpy().squeeze()

                matrix_label = matrix_label.cpu().numpy().squeeze()

                objectness_pred  = batch_dict['batch_objectness_preds'].detach().cpu().numpy().squeeze()
                objectness_label = objectness_label.detach().cpu().numpy().squeeze()

                suction_cls_pred_mask = suction_cls_pred > 0.4
                grasp_cls_pred_mask = grasp_cls_pred > 0.3
                grasp_suction_all = suction_cls_pred_mask & grasp_cls_pred_mask
                suction_only = suction_cls_pred_mask ^ grasp_suction_all
                grasp_only = grasp_cls_pred_mask ^ grasp_suction_all
                postive_mask = suction_cls_pred_mask | grasp_cls_pred_mask
                negtive_mask = ~postive_mask
                objectness_pred_mask = objectness_pred > 0.5
                ground_mask = ~objectness_pred_mask
                pointcloud_blue = trimesh.PointCloud(data_[objectness_pred_mask], colors=[0, 0, 255])
                pointcloud_blue_ = trimesh.PointCloud(data_[ground_mask], colors=[0, 255, 0])
                pointcloud_blue_label = trimesh.PointCloud(data_[objectness_label==1], colors=[0, 0, 255])
                pointcloud_blue_label_ = trimesh.PointCloud(data_[objectness_label==0], colors=[0, 255, 0])
                scene = trimesh.Scene([pointcloud_blue, pointcloud_blue_])
                scene1 = trimesh.Scene([pointcloud_blue_label, pointcloud_blue_label_])
                # Application(scene, scene1)


                # index_suction_cls = [i for i in range(len(suction_cls_pred_mask)) if suction_cls_pred_mask[i]]
                # index_grasp_cls = [j for j in range(len(grasp_cls_pred_mask)) if grasp_cls_pred_mask[j]]
                # index_grasp_suction_cls = [k for k in range(len(grasp_suction_all)) if grasp_suction_all[k]]
                # print(index_grasp_suction_cls)
                # print(index_grasp_cls)
                # print(index_suction_cls)
                # score_grasp_value_ind = list((item, index_grasp_cls[index], 0) for index, item in enumerate(grasp_score_pred[grasp_cls_pred_mask]))
                # score_suction_value_ind = list((item, index_suction_cls[index], 1) for index, item in enumerate(suction_score_pred[suction_cls_pred_mask]))
                # candidates = score_grasp_value_ind + score_suction_value_ind
                # result = sorted(candidates, reverse=True)
                # print('candidates', score_grasp_value_ind)
                # print('candidates' , score_suction_value_ind)
                # print('***********************************************')
                # print('result',result)


                if not True in grasp_suction_all:
                    print('NO grasp_suction_all')
                else:
                    pointcloud_blue = trimesh.PointCloud(data_[grasp_suction_all], colors=[0, 0, 255])
                pointcloud_suction_only = trimesh.PointCloud(data_[suction_only],colors=[255,165,0])
                pointcloud_grasp_only = trimesh.PointCloud(data_[grasp_only],colors=[255,0,255])
                pointcloud_blue_ = trimesh.PointCloud(data_[negtive_mask], colors=[0, 255, 0])
                if not True in grasp_suction_all:
                    scene = trimesh.Scene(
                        [pointcloud_blue_, pointcloud_suction_only, pointcloud_grasp_only])

                else:
                    scene = trimesh.Scene(
                        [pointcloud_blue, pointcloud_blue_, pointcloud_suction_only, pointcloud_grasp_only])
                # scene.show()


                a_mask = suction_cls_label == 1
                b_mask = grasp_cls_label == 1
                all_mask = a_mask & b_mask
                suction_only_label = a_mask ^ all_mask
                grasp_only_label = b_mask ^ all_mask
                postive_mask_label = a_mask | b_mask
                negtive_mask_label = ~postive_mask_label

                if not True in all_mask:
                    print('NO grasp_suction_all')
                else:
                    pointcloud_blue = trimesh.PointCloud(data_[all_mask], colors=[0, 0, 255])
                pointcloud_suction_only_label = trimesh.PointCloud(data_[suction_only_label],colors=[255,165,0])
                pointcloud_grasp_only_label = trimesh.PointCloud(data_[grasp_only_label],colors=[255,0,255])
                pointcloud_negtive_label = trimesh.PointCloud(data_[negtive_mask_label], colors=[0, 255, 0])
                if not True in all_mask:
                    scene1 = trimesh.Scene(
                        [pointcloud_suction_only_label, pointcloud_grasp_only_label, pointcloud_negtive_label])

                else:
                    scene1 = trimesh.Scene(
                        [pointcloud_blue, pointcloud_suction_only_label, pointcloud_grasp_only_label, pointcloud_negtive_label])
                # Application(scene, scene1)



                # exit()
                #
                show = True

                if show:
                    if mode in ['train', 'validate']:
                        red = np.where(grasp_cls_label == 1)
                        red_ = np.where(grasp_cls_label != 1)
                        pointcloud_red = trimesh.PointCloud(data_[red], colors=[255, 0, 0])
                        pointcloud_red_ = trimesh.PointCloud(data_[red_], colors=[0, 255, 0])


                    scene1 = trimesh.Scene()
                    scene1.add_geometry(pointcloud_red)
                    scene1.add_geometry(pointcloud_red_)

                    ray_origins = data_[red]
                    ray_directions = normals_label[red]
                    vis_path = np.hstack((ray_origins, ray_origins+ray_directions/100)).reshape(-1, 2, 3)
                    ray_visualize = trimesh.load_path(vis_path)

                    ray_directions_ = normals_pred[red]
                    vis_path_pred = np.hstack((ray_origins, ray_origins+ray_directions_/100)).reshape(-1, 2, 3)
                    ray_visualize_pred = trimesh.load_path(vis_path_pred)

                    colors = np.ones((len(ray_visualize.entities), 3))
                    colors2 = np.ones((len(ray_visualize_pred.entities), 3))
                    colors_1 = (colors * [255, 0, 0]).astype(np.uint8)   #label_vis
                    colors_2 = (colors2 * [255,255, 51]).astype(np.uint8) #pred_vis
                    ray_visualize.colors = colors_1
                    ray_visualize_pred.colors = colors_2
                    # print('show normal')
                    # scene1.add_geometry(ray_visualize)
                    # scene1.add_geometry(ray_visualize_pred)

                    blue = np.where(grasp_cls_pred >= config.seg_thresh+0.1)
                    blue_ = np.where(grasp_cls_pred < config.seg_thresh+0.1)

                    pointcloud_blue = trimesh.PointCloud(data_[blue], colors=[0, 0, 255])

                    pointcloud_blue_ = trimesh.PointCloud(data_[blue_], colors=[0, 255, 0])

                    ray_origins_blue = data_[blue]

                    ray_directions_blue = normals_pred[blue]
                    vis_path_pred_blue_score = np.hstack((ray_origins_blue, ray_origins_blue+ray_directions_blue/100)).reshape(-1, 2, 3)
                    ray_visualize_pred_blue_score = trimesh.load_path(vis_path_pred_blue_score)
                    # print(distance_pred[blue])
                    scene2 = trimesh.Scene()

                    red_blue = np.intersect1d(red, blue)
                    print('red_blue',type(red_blue),red_blue.shape)

                    red_score = np.intersect1d(red, score_mask)
                    blue_score = np.intersect1d(blue, score_mask)

                    red_blue_score = np.intersect1d(red_score, blue_score)


                    for i in range(25):
                        if red_blue.shape[0] == 0:
                            print('NO grasp points')
                            break
                        else:
                            pass
                        R = grasp_pose_pred[:, :, [3, 4, 5, 6, 7, 8, 10, 11, 12]].view(-1, 3, 3).transpose(-1,
                                                                                                     -2).detach().cpu().numpy()[red_blue][i]


                        R_label = matrix_label[red_blue][i]

                        width = grasp_pose_pred[:, :, 9].view(-1).detach().cpu().numpy()[red_blue][i]
                        # ori_width = grasp_pose_pred[:, :, -2].view(-1).detach().cpu().numpy()[red_blue][i]
                        off_d = grasp_pose_pred[:, :, -1].view(-1).detach().cpu().numpy()[red_blue][i]
                        off_d_label = grasp_pose_label[:,-1][red_blue][i]
                        print('***********offset_distance***************',off_d, 'label is ',off_d_label)
                        approach_vector = grasp_pose_pred[:, :,[3,4,5]].view(-1,3).detach().cpu().numpy()[red_blue][i]

                        close_vector = grasp_pose_pred[:, :, [6,7,8]].view(-1,3).detach().cpu().numpy()[red_blue][i]

                        center = grasp_pose_pred[:, :, :3].view(-1,3).detach().cpu().numpy()[red_blue][i]

                        # center = data_[red_blue][i] + distance_pred[red_blue][i] * approach_vector

                        approach_vector_label = matrix_label[:,:,0].reshape(-1,3)

                        center_label = data_[red_blue][i] + distance_label[red_blue][i] * approach_vector_label[red_blue][i]
                        close_vector_label = matrix_label[:,:,1].reshape(-1,3)[red_blue][i]

                        pc_center = trimesh.PointCloud(center.reshape(1,3),colors=[255,110,0])
                        pc_center_label = trimesh.PointCloud(center_label.reshape(1, 3), colors=[255,0,255])

                        vis_appoach_vector = np.hstack((center, center + approach_vector/50)).reshape(-1, 2, 3)
                        vis_appoach_vector_label = np.hstack((center_label, center_label + approach_vector_label[red_blue][i] / 50)).reshape(-1, 2, 3)

                        ray_visualize_appoach_v = trimesh.load_path(vis_appoach_vector)
                        ray_visualize_appoach_v_label = trimesh.load_path(vis_appoach_vector_label)

                        vis_close_v1 = np.hstack((center, center - close_vector / 50)).reshape(-1, 2, 3)
                        vis_close_v2 = np.hstack((center, center + close_vector / 50)).reshape(-1, 2, 3)

                        vis_close_label_v1 = np.hstack((center_label, center_label - close_vector_label / 50)).reshape(-1, 2, 3)
                        vis_close_label_v2 = np.hstack((center_label, center_label + close_vector_label / 50)).reshape(-1, 2, 3)

                        ray_visualize_c1 = trimesh.load_path(vis_close_v1)
                        ray_visualize_c2 = trimesh.load_path(vis_close_v2)

                        ray_visualize_label_c1 = trimesh.load_path(vis_close_label_v1)
                        ray_visualize_label_c2 = trimesh.load_path(vis_close_label_v2)

                        change_ray_color(ray_visualize_appoach_v, [0, 0, 255])
                        change_ray_color(ray_visualize_appoach_v_label, [255, 0, 255])
                        change_ray_color(ray_visualize_c1, [255, 0, 0])
                        change_ray_color(ray_visualize_c2, [255, 0, 0])
                        change_ray_color(ray_visualize_label_c1, [255, 165, 0])
                        change_ray_color(ray_visualize_label_c2, [255, 165, 0])

                        box = draw_box(center, R, w = width)

                        width_label = grasp_pose_label[:, -2][red_blue][i]
                        width_label_ori = grasp_pose_label[:, 3][red_blue][i]

                        print('new_width_pred is {} ,new_width_label is {}, ori_width_label is {}'.format(width,  width_label,width_label_ori))

                        box_label = draw_box(center_label, R_label, w = width_label_ori)

                        change_ray_color(box, [0, 0, 255])
                        change_ray_color(box_label, [255, 0, 255])
                        scene2.add_geometry(box)
                        scene2.add_geometry(box_label)
                        scene2.add_geometry(pc_center)
                        scene2.add_geometry(pc_center_label)
                        scene2.add_geometry(ray_visualize_appoach_v)
                        scene2.add_geometry(ray_visualize_appoach_v_label)
                        scene2.add_geometry(ray_visualize_c1)
                        scene2.add_geometry(ray_visualize_c2)
                        scene2.add_geometry(ray_visualize_label_c1)
                        scene2.add_geometry(ray_visualize_label_c2)


                    scene2.add_geometry(pointcloud_blue)
                    scene2.add_geometry(pointcloud_blue_)




                    scene2.add_geometry(ray_visualize_pred_blue_score)
                    # scene2.show()
                    Application(scene1 , scene2)






