import torch
import numpy as np
import torch.nn as nn
import torchvision.transforms as transforms
import sys
sys.path.append('../')
import dataset.data_utils as d_utils
from dataset.BindatasetLoader import Bindataset
from models.pointnet2_ssg_sem import PointNet2SemSegSSG
from models.pointnet2_msg_sem import PointNet2SemSegMSG
#from models.densepoint_cls_L6_k24_g2 import DensePoint
from common.utils.config import load_yaml_to_AttrDict, merge_cfg_into_cfg
from common.lib.metric import AverageMeter
import trimesh
import yaml
import os

os.environ['CUDA_VISIBLE_DEVICES']= '0'
def accuracy(output, target, topk=(1,)):
    """Computes the accuracy over the k top predictions for the specified values of k"""
    with torch.no_grad():
        output = output.transpose(1, 2)
        output = output.contiguous().view(-1, output.size(-1))
        target = target.view(-1)
        maxk = max(topk)
        batch_size = target.size(0)
#        print(index.shape)
#        print('top@', maxk)
        _, pred = output.topk(maxk, 1, True, True)
        pred = pred.t()
#        print('pred', pred)
        correct = pred.eq(target.view(1, -1).expand_as(pred))
        res = []
        for k in topk:
            correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
            res.append(correct_k.mul_(100.0 / batch_size))
        return res, pred
b = 64
config = merge_cfg_into_cfg('../experiments/local_msg_classification_config.yaml', '../experiments/base_config.yaml')
val_dataset = Bindataset(dataset_dir='/home/v-wewei/code/two_stage_pointnet/mask_label/', num_points=config.num_points, transforms=transforms.Compose([d_utils.PointcloudToTensor(),d_utils.PointcloudJitter(std=0.0001, clip=0.0001),]),train=False, platform=config.platform)
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=b, shuffle=True,drop_last=False, num_workers=12 if b > 1 else 1, pin_memory=True)
model = eval(config.model)(config)
#pretrained_dict = torch.load('{}'.format('/home/v-wewei/code/checkpoints/test_FocalLoss_018_0.pth.tar'), map_location='cpu')
for i in range(0, 15):
    pretrained_dict = torch.load('/home/v-wewei/code/two_stage_pointnet/checkpoints/test_CrossEntropyLoss_{:03d}_0.pth.tar'.format(i), map_location='cpu')
    model.load_state_dict(pretrained_dict['state_dict'], strict=True)
    
    model.cuda()
    model.eval()
    ratio = AverageMeter()
    recall = AverageMeter()
    with torch.no_grad():
        for i, batch in enumerate(val_loader):
#            batch = val_loader[0]
            data, label, dataset_idx = batch
            data = data.cuda(non_blocking=True)
            cls = label[0].cuda(non_blocking=True)
            output = model(data)
    #        output = output.sigmoid()
            #output = m(output)
            res, pred = accuracy(output.data, cls, topk=(1,))
            data_ = data.detach().cpu().numpy().squeeze()
            #left = np.where(output_ > 0.3)
            #
            #print(left)
            #print(cls.shape)
            #print(np.where(cls > 0.3))
            pred = pred.detach().cpu().numpy().reshape(-1, config.num_points).squeeze()
#            print(pred.max())
#            exit()
#            print(pred.shape)

            cls = cls.detach().cpu().numpy().squeeze()

            red = np.where(cls == 1)
            blue = np.where(pred == 1)
            #print(data_.shape)
            #print(data_[red].shape)
            #print(data_[blue].shape)

            #pointcloud = trimesh.PointCloud(data_, colors=[0, 255, 0])
            #pointcloud.show()
            #pointcloud_pred = trimesh.PointCloud(data_[blue], colors=[0, 0, 255])
            #pointcloud_true = trimesh.PointCloud(data_[red], colors=[255, 0, 0])
            #scene = trimesh.Scene()
            #scene.add_geometry(pointcloud)
            ##scene.add_geometry(pointcloud_pred)
            #scene.add_geometry(pointcloud_true)
            #scene.show()
            #scene_ = trimesh.Scene()
            #scene_.add_geometry(pointcloud)
            #scene_.add_geometry(pointcloud_pred)
            #scene_.show()
            for pred_, cls_ in zip(pred, cls):
                area_1 = np.where(pred_ == 1)
                area_2 = np.where(cls_ == 1)
                #print(area_1[0])
                #set_a = set()
                #[set_a.add(i) for i in area_1[0]]
                #print(set_a)
    
                area_1 = set(area_1[0])
                area_2 = set(area_2[0])
                U = set.union(area_1, area_2)
                I = set.intersection(area_1, area_2)
                if len(U) == 0:
                    ratio_ = 0
                else:
                    ratio_ = len(I) / len(U)
                if len(area_2) ==0:
                    ratio_recall = 0
                else:
                    ratio_recall = len(I) / len(area_2)
                ratio.update(ratio_, 1)
                recall.update(ratio_recall, 1)
        print('IOU is : ', ratio.avg)
        print('Recall is : ', recall.avg)
    
