import torch
import numpy as np
import torch.nn as nn
import torchvision.transforms as transforms
import sys
import transforms3d as t3d
import open3d as o3d

sys.path.append('../')
import dataset.data_utils as d_utils
from dataset.BindatasetLoader import Bindataset
from models.two_stage_graspnet import Two_Stage_GraspNet
from models.pointnet2_msg_sem import PointNet2SemSegMSG
from models.roi_head import RoI_Head
# from common.utils.config import merge_cfg_into_cfg
from common.lib.metric import AverageMeter
from common.lib.bbox import decode_bbox_target, angle_to_vector, grasp_angle_to_vector, rotation_from_vector
from utils.pointnet2_utils import furthest_point_sample, gather_operation
import trimesh
import yaml
import os
import trimesh.viewer
import pyglet
import glooey
import torch.backends.cudnn as cudnn
import random

os.environ['CUDA_VISIBLE_DEVICES'] = '0'
random.seed(1)
np.random.seed(1)
torch.manual_seed(1)
torch.cuda.manual_seed(1)
cudnn.benchmark = False
cudnn.determinstic = True


class Application:
    """
    Example application that includes moving camera, scene and image update.
    """

    def __init__(self, scene_1, scene_2):
        # create window with padding
        self.width, self.height = 920 * 2, 960
        window = self._create_window(width=self.width, height=self.height)

        gui = glooey.Gui(window)

        hbox = glooey.HBox()
        hbox.set_padding(5)

        # scene widget for changing camera location
        self.scene_widget1 = trimesh.viewer.SceneWidget(scene_1)
        hbox.add(self.scene_widget1)

        # scene widget for changing scene
        self.scene_widget2 = trimesh.viewer.SceneWidget(scene_2)
        hbox.add(self.scene_widget2)

        # integrate with other widget than SceneWidget

        gui.add(hbox)

        pyglet.app.run()

    def _create_window(self, width, height):
        try:
            config = pyglet.gl.Config(sample_buffers=1,
                                      samples=4,
                                      depth_size=24,
                                      double_buffer=True)
            window = pyglet.window.Window(config=config,
                                          width=width,
                                          height=height)
        except pyglet.window.NoSuchConfigException:
            config = pyglet.gl.Config(double_buffer=True)
            window = pyglet.window.Window(config=config,
                                          width=width,
                                          height=height)

        @window.event
        def on_key_press(symbol, modifiers):
            if modifiers == 0:
                if symbol == pyglet.window.key.Q:
                    window.close()

        return window


def accuracy(output, target, topk=(1,)):
    """Computes the accuracy over the k top predictions for the specified values of k"""
    with torch.no_grad():
        output = output.transpose(1, 2)
        output = output.contiguous().view(-1, output.size(-1))
        target = target.view(-1)
        maxk = max(topk)
        batch_size = target.size(0)
        #        print(index.shape)
        #        print('top@', maxk)
        _, pred = output.topk(maxk, 1, True, True)
        pred = pred.t()
        #        print('pred', pred)
        correct = pred.eq(target.view(1, -1).expand_as(pred))
        res = []
        for k in topk:
            correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
            res.append(correct_k.mul_(100.0 / batch_size))
        return res, pred


# 0.041  0.04  0.018
def draw_box(center, R, l=0.041, w=0.04, h=0.018):
    x_corners = [0.004, 0.004, -(l - 0.004), -(l - 0.004), 0.004, 0.004, -(l - 0.004), -(l - 0.004)]
    # x_corners = [x + center[0][0] for x in x_corners]
    y_corners = [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2]
    # y_corners = [y + center[0][1] for y in y_corners]
    z_corners = [h / 2, h / 2, h / 2, h / 2, -h / 2, -h / 2, -h / 2, -h / 2]
    # z_corners = [z + center[0][2] for z in z_corners]
    # print(np.vstack([x_corners, y_corners, z_corners]))
    corners_3d = np.dot(R, np.vstack([x_corners, y_corners, z_corners]))
    # print(corners_3d)
    box_sample = corners_3d + np.expand_dims(center, -1)
    box_sample = box_sample.transpose(0, 2, 1)

    ray_origins = np.array([box_sample[:, 0], box_sample[:, 2], box_sample[:, 5], box_sample[:, 7]] * 3)
    # ray_origins = np.array([box_sample[:, 2], box_sample[:, 3]]*2)
    x_axis = R[:, :, 0]
    y_axis = R[:, :, 1]
    z_axis = R[:, :, 2]
    ray_directions = np.array(
        [-w * y_axis, w * y_axis, w * y_axis, -w * y_axis, -l * x_axis, l * x_axis, -l * x_axis, l * x_axis,
         -h * z_axis, -h * z_axis, h * z_axis, h * z_axis])  # [-0.004, 0.004, 0.004, -0.004]
    # ray_directions = np.array([l*x_axis, l*x_axis, -w*y_axis, w*y_axis]) #[-0.004, 0.004, 0.004, -0.004]
    # ray_directions = np.array([l*x_axis, l*x_axis, w*y_axis, -w*y_axis]) #[-0.004, 0.004, 0.004, -0.004]
    # print(ray_origins.shape)
    # print(ray_directions.shape)
    path = trimesh.load_path(
        np.hstack((ray_origins.reshape(-1, 3), (ray_origins + ray_directions).reshape(-1, 3))).reshape(-1, 2, 3))
    # print(dir(path))
    # path.colors = [0,255,0,0.8]
    # exit()
    # print(ray_origins.shape)
    # print(np.expand_dims(ray_directions, 1).shape)
    # temp = ray_origins+np.expand_dims(ray_directions, 1)
    # print(ray_origins[0][0], temp[0][0])
    # print(ray_origins[0], temp[0][1])

    # exit()
    # print(ray_origins + np.expand_dims(ray_directions, 1))
    # print(ray_origins.transpose(1, 0, 2).shape)
    # path = trimesh.load_path(np.hstack((ray_origins, ray_origins + ray_directions)))
    return path
    # path.show()
    # exit()


b = 1
mode = 'validate'
# mode = 'train'
from common.utils.config_ import cfg, cfg_from_yaml_file, merge_new_config_file

config = cfg_from_yaml_file('../experiments/base_config.yaml', cfg)
config = merge_new_config_file(config, '../experiments/firststage_only_msg_classification_config.yaml')
# config = merge_cfg_into_cfg('../experiments/local_msg_classification_config.yaml', '../experiments/base_config.yaml')
if not config.non_uniform_sampling:
    config.num_points *= config.sample_rate
val_dataset = Bindataset(
    dataset_dir='../mask_label/',
    # dataset_dir='/home/v-wewei/VV_grasp_dataset/',
    num_points=int(config.num_points),
    transforms=transforms.Compose([
        d_utils.PointcloudToTensor(),
        # d_utils.PointcloudJitter(std=0.0001, clip=0.0001),
    ]),
    mode=mode,
    use_normal=config.use_normal,
    platform=config.platform,
    # non_uniform_sampling=False,
    aug_scene=False
)

val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=b, shuffle=False,
                                         drop_last=False, num_workers=11 if b > 1 else 0, pin_memory=True)

model = eval(config.model)(config)
# model = PointNet2SemSegMSG(config)
# pretrained_dict = torch.load('{}'.format('/home/v-wewei/code/checkpoints/test_FocalLoss_018_0.pth.tar'), map_location='cpu')
for i in range(29, 30):
    pretrained_dict = torch.load(
        '/home/lifuyu/data_disk/v-wewei_data/checkpoints/test_two_stage_loss_{:03d}_0.pth.tar'.format(i),
        map_location='cpu')
    model.load_state_dict(pretrained_dict['state_dict'], strict=True)

    model.cuda()
    model.eval()
    ratio = AverageMeter()
    recall = AverageMeter()
    with torch.no_grad():
        flag = 0
        for i, batch in enumerate(val_loader):
            #            batch = val_loader[0]
            if True:
                data, label, dataset_idx = batch
                print('dataset_idx is : ', dataset_idx)
                # if not flag == 6:
                #     flag += 1
                #     continue
                data = data.cuda(non_blocking=True)
                cls_label = label[0].cuda(non_blocking=True)
                score_label = label[1].cuda(non_blocking=True)
                pose_label = label[2].cuda(non_blocking=True)
                if config.train_two_stage:
                    matrix_label = label[3].cuda(non_blocking=True)

                if not config.non_uniform_sampling:
                    print('non uniform_sampling')
                    idx = furthest_point_sample(data, int(config.num_points / config.sample_rate))
                    data = gather_operation(
                        data.transpose(1, 2).contiguous(),
                        idx).transpose(1, 2).contiguous()
                    cls_label = gather_operation(
                        cls_label.float().unsqueeze(dim=2).transpose(1, 2).contiguous(),
                        idx).squeeze().long().contiguous()
                    pose_label = gather_operation(
                        pose_label.transpose(1, 2).contiguous(),
                        idx).transpose(1, 2).contiguous()
                # z_vec = z_vec.cuda(non_blocking=True)
                fg_mask = (cls_label.view(-1) > 0)
                # print('fg_sum is:', fg_mask.long().sum())
                fg_mask_ = (cls_label.squeeze() > 0).long()
                if config.train_two_stage:
                    kwargs = {'cls_label': cls_label, 'score_label': score_label, 'pose_label': pose_label,
                              'matrix_label': matrix_label, 'mode': 'validate'}
                else:
                    kwargs = {'cls_label': cls_label, 'score_label': score_label, 'pose_label': pose_label,
                              'mode': 'validate'}

                batch_dict = model(data, **kwargs)

                data_ = data.detach().cpu().numpy().squeeze()
                # print('data shape is : ', data_.shape)

                cls_label = cls_label.detach().cpu().numpy().squeeze()
                cls_pred = batch_dict['batch_cls_preds'].detach().cpu().numpy().squeeze()
                pose_pred = batch_dict['batch_grasp_preds']
                pose_pred2 = batch_dict['new_batch_grasp_preds']
                pose_pred3 = batch_dict['gt_grasps']
                #  print(batch_dict.keys())
                if config.train_two_stage:
                    rois_width = batch_dict['rois'].detach().cpu().numpy().squeeze()[:, 9]
                    new_grasp_width = batch_dict['new_batch_grasp_preds'].detach().cpu().numpy().squeeze()[:, 9]
                    gt_width = pose_label.detach().cpu().numpy().squeeze()[:, 6]
                    selected_index = batch_dict['selected'].cpu().numpy()
                    non_zero_index = np.where(gt_width[selected_index] > 0)

                show = True
                if show:
                    if mode in ['train', 'validate']:
                        red = np.where(cls_label == 1)
                        red_ = np.where(cls_label != 1)
                        pointcloud_red = trimesh.PointCloud(data_[red], colors=[255, 0, 0])
                        pointcloud_red_ = trimesh.PointCloud(data_[red_], colors=[0, 255, 0])
                        pose_label = pose_label.detach().cpu().numpy().squeeze()

                    # print('max is :', cls_pred.max())
                    # print('min is :', cls_pred.min())
                    blue = np.where(cls_pred >= config.seg_thresh)
                    blue_ = np.where(cls_pred < config.seg_thresh)
                    pointcloud_blue = trimesh.PointCloud(data_[blue], colors=[0, 0, 255])
                    pointcloud_blue_ = trimesh.PointCloud(data_[blue_], colors=[0, 255, 0])

                    point_cloud = trimesh.PointCloud(data_, colors=[0, 255, 0])
                    selected_box_inds = batch_dict['selected'].detach().cpu().numpy()
                    # print(selected_box_inds)
                    # sampled_box_inds = batch_dict['target_sampled_inds'].detach().cpu().numpy()
                    # show points in the first box
                    selected_points = batch_dict['selected_points'].squeeze()[0].detach().cpu().numpy()
                    # print('selected_points shape is : ', selected_points.shape)
                    ori_points = batch_dict['ori_points'].squeeze().detach().cpu().numpy()
                    pointcloud_selected_points = trimesh.PointCloud(selected_points, colors=[255, 0, 0])
                    pointcloud_ori = trimesh.PointCloud(ori_points, colors=[0, 255, 0])
                    # print(pose_pred.shape,pose_pred2.shape,pose_pred3.shape)
                    R = pose_pred[:, :, [3, 4, 5, 6, 7, 8, 10, 11, 12]].view(-1, 3, 3).transpose(-1,
                                                                                                 -2).detach().cpu().numpy()[
                        selected_box_inds]
                    # R = R[non_zero_index]
                    R2 = pose_pred2[:, :, [3, 4, 5, 6, 7, 8, 10, 11, 12]].view(-1, 3, 3).transpose(-1,
                                                                                                   -2).detach().cpu().numpy()
                    # R2 = R2[non_zero_index]
                    R3 = matrix_label.detach().cpu().numpy().squeeze()[selected_box_inds]
                    # R3 = R3[non_zero_index]
                    pose_pred = pose_pred.detach().cpu().numpy().squeeze()
                    pose_pred2 = pose_pred2.detach().cpu().numpy().squeeze()
                    pose_pred3 = pose_pred3.detach().cpu().numpy().squeeze()
                    #                    print('R',R.shape,R[selected_box_inds].shape)
                    # stage1_center =pose_pred[selected_box_inds][non_zero_index][:, :3]
                    stage1_center = pose_pred[selected_box_inds][:, :3]
                    # stage2_center = pose_pred2[non_zero_index][:,:3]
                    stage2_center = pose_pred2[:, :3]
                    # gt_center = pose_label[selected_box_inds[non_zero_index], :3] +  data_[selected_box_inds][non_zero_index]
                    gt_center = pose_label[selected_box_inds, :3] + data_[selected_box_inds]
                    pred_width = pose_pred[:, 9]
                    # pred_width2 = pose_pred2[:,9]
                    stage1_w = pred_width[selected_box_inds]
                    # stage1_w = stage1_w[non_zero_index]

                    new_grasp_width = batch_dict['new_batch_grasp_preds'].detach().cpu().numpy().squeeze()[:, 9]
                    new_label = torch.sigmoid(batch_dict['new_batch_cls_preds']).detach().cpu().numpy().squeeze()

                    #o3d
                    # filter window
                    pcd_all = o3d.PointCloud()
                    pcd_all_ori = o3d.PointCloud()
                    pcd_f = o3d.PointCloud()
                    pcd_b = o3d.PointCloud()
                    pcd_red = o3d.PointCloud()
                    pcd_red_ = o3d.PointCloud()

                    pcd_red.points = o3d.Vector3dVector(data_[red])
                    x = data_[red_][:, 0]
                    y = data_[red_][:, 1]
                    z = data_[red_][:, 2]
                    index_window_x = np.where((x < 0.168) & (x > -0.168))
                    index_window_y = np.where((y < 0.165) & (y > -0.165))
                    index_window_z = np.where(z > 0.02)
                    index_window = np.intersect1d(index_window_x, index_window_y, index_window_z)
                    point_data_window = data_[red_][index_window]
                    pcd_red_.points = o3d.Vector3dVector(data_[red_][index_window])

                    x1 = data_[:, 0]
                    y1 = data_[:, 1]
                    z1 = data_[:, 2]
                    index_window_x1 = np.where((x1 < 0.168) & (x1 > -0.168))
                    index_window_y1 = np.where((y1 < 0.165) & (y1 > -0.165))
                    index_window_z1 = np.where(z1 > 0.02)
                    index_window1 = np.intersect1d(index_window_x1, index_window_y1, index_window_z1)
                    point_data_window_ori = data_[index_window1]

                    pcd_red.paint_uniform_color([1, 0, 0])
                    # pcd_red_.paint_uniform_color([70/255, 70/255, 70/255])
                    pcd_red_.paint_uniform_color([90 / 255, 166 / 255, 119 / 255])
                    pcd_all.points = o3d.Vector3dVector(point_data_window)
                    pcd_all.paint_uniform_color([150/255, 150/255, 150/255])
                    #o3d.draw_geometries([pcd_all,pcd_red])
                    pcd_all_ori.points = o3d.Vector3dVector(data_[index_window1])
                    pcd_all_ori.paint_uniform_color([70 / 255, 70 / 255, 70 / 255])
                    #o3d.draw_geometries([pcd_all_ori])
                    #o3d.write_point_cloud("/home/v-wewei/code/two_stage_pointnet_bak/sync_6_.ply", pcd_all_ori)
                   #  o3d.draw_geometries([pcd_all])
                    #o3d.write_point_cloud("/home/v-wewei/code/two_stage_pointnet_bak/sync_6_.ply", [pcd_red,pcd_red_])
                    #o3d.draw_geometries([pcd_red_,pcd_red])
                    #o3d.write_point_cloud("../pcd_all.ply", pcd_all)

                    scene_p = trimesh.Scene()
                    scene = trimesh.Scene()
                    scene1 = trimesh.Scene()
                    #scene.add_geometry(point_cloud)
                    # scene_p.add_geometry(pointcloud_blue_)
                    #scene_p.add_geometry(pointcloud_blue)
                    red = trimesh.PointCloud(data_[red],colors=[255,0,0])
                    red_ = trimesh.PointCloud(data_[red_][index_window],colors=[90 / 255, 166 / 255, 119 / 255])
                    pc_ori = trimesh.PointCloud(data_[index_window1],colors=[80/255,80/255,80/255])
                    #scene_p.add_geometry(pc_ori)
                    scene_p.add_geometry(red)
                    scene_p.add_geometry(red_)
                    trimesh.viewer.SceneViewer(scene_p,line_settings={'point_size':5})
                    scene_p.show()
                    scene_t = trimesh.Scene()
                    scene_t.add_geometry(pointcloud_red)
                    scene_t.add_geometry(pointcloud_red_)
                    #Application(scene_p, scene_t)



#                    #path_1 = draw_box(pose_pred[selected_box_inds][sampled_box_inds][:1], R[selected_box_inds][sampled_box_inds][:1], w=targets_dict['rois'][0, 0, 9].detach().cpu().numpy())
#                    #print(pose_pred[selected_box_inds][sampled_box_inds][:1])
#                    #path_2 = draw_box(np.zeros((1,3)), R=np.identity(3).reshape(-1, 3, 3))
#                    center = pose_label[fg_mask.cpu().numpy()][:, :3]+data_[fg_mask.cpu().numpy()]
#                    print(center.shape)
#                    center_vis = trimesh.PointCloud(center, colors=[255, 255, 0])
#                    center_pred_vis = trimesh.PointCloud(pose_pred[fg_mask.cpu().numpy()][:, :3].squeeze(), colors=[0, 255, 255])
#                    #ray_origins = data_[fg_mask.cpu().numpy()]
#                    #ray_directions = closing_vector_label.cpu().numpy()
#                    #ray_visualize = trimesh.load_path(np.hstack((ray_origins, ray_origins+ray_directions / 50)).reshape(-1, 2, 3))
#
#                    scene_1 = trimesh.Scene()
#                    scene_1.add_geometry(center_vis)
#                    scene_1.add_geometry(center_pred_vis)
#                    #scene_1.add_geometry(path_1)
#                    #scene_1.add_geometry(ray_visualize)
#                   # scene_1.add_geometry(path_2)
#                    scene_1.add_geometry(pointcloud_red_)
#                    scene_1.add_geometry(pointcloud_red)
#                    #scene_1.add_geometry(pointcloud_selected_points)
#                    Application(scene, scene_1)
#                    #scene_1.show()
#                #print(targets_dict.keys())
# except:
#  else:
#      pass


