import numpy as np
import torch
import torch.nn as nn
import numpy as np
from PIL import Image
import torch.nn.functional as F
import evalution_segmentaion as es

# def cross_entropy2d(input, target, weight=None, size_average=True):
#     # input: (n, c, h, w), target: (n, h, w)
#     n, c, h, w = input.size()
#     # log_p: (n, c, h, w)
#     log_p = F.log_softmax(input, dim=1)
#
#     # log_p: (n*h*w, c)
#     log_p = log_p.transpose(1, 2).transpose(2, 3).contiguous()
#     log_p = log_p[target.view(n, h, w, 1).repeat(1, 1, 1, c) >= 0]
#     log_p = log_p.view(-1, c)
#     # target: (n*h*w,)
#     mask = target >= 0
#     target = target[mask]
#     print(log_p.shape)
#     print(target.shape)
#     print(np.unique(target.numpy()))
#     loss = F.nll_loss(log_p, target, weight=weight, reduction='sum')
#     if size_average:
#         loss /= mask.data.sum()
#     return loss
classes = ['background', 'aeroplane', 'bicycle', 'bird', 'boat',
           'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable',
           'dog', 'horse', 'motorbike', 'person', 'potted plant',
           'sheep', 'sofa', 'train', 'tv/monitor']

# RGB color for each class
colormap = [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0], [0, 0, 128],
            [128, 0, 128], [0, 128, 128], [128, 128, 128], [64, 0, 0], [192, 0, 0],
            [64, 128, 0], [192, 128, 0], [64, 0, 128], [192, 0, 128],
            [64, 128, 128], [192, 128, 128], [0, 64, 0], [128, 64, 0],
            [0, 192, 0], [128, 192, 0], [0, 64, 128]]
print(len(classes))

cm2lbl = np.zeros(256 ** 3)
for i, cm in enumerate(colormap):
    cm2lbl[(cm[0] * 256 + cm[1]) * 256 + cm[2]] = i


def image2label(im):
    data = np.array(im, dtype='int32')
    idx = (data[:, :, 0] * 256 + data[:, :, 1]) * 256 + data[:, :, 2]
    return np.array(cm2lbl[idx], dtype='int64')

def _fast_hist(label_true, label_pred, n_class):
    mask = (label_true >= 0) & (label_true < n_class)
    hist = np.bincount(
        n_class * label_true[mask].astype(int) +
        label_pred[mask], minlength=n_class ** 2).reshape(n_class, n_class)
    return hist


def label_accuracy_score(label_trues, label_preds, n_class):
    """Returns accuracy score evaluation result.
      - overall accuracy
      - mean accuracy
      - mean IU
      - fwavacc
    """
    hist = np.zeros((n_class, n_class))
    for lt, lp in zip(label_trues, label_preds):
        hist += _fast_hist(lt.flatten(), lp.flatten(), n_class)
    acc = np.diag(hist).sum() / hist.sum()
    acc_cls = np.diag(hist) / hist.sum(axis=1)
    acc_cls = np.nanmean(acc_cls)
    iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist))
    mean_iu = np.nanmean(iu)
    freq = hist.sum(axis=1) / hist.sum()
    fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()
    return acc, acc_cls, mean_iu, fwavacc


loss_func = nn.CrossEntropyLoss()
# [batch_size,channel,w,h].这里是 一次8张图片，预测类别为21类(VOC20类+背景) .宽高为4,4
out = torch.randn((1, 21, 281, 500))
target = np.array(Image.open("image/test3.png").convert("RGB"))
target=image2label(target)
print(np.unique(target))
target = torch.LongTensor(target).view(1, 281, 500)

loss = loss_func(out, target)

pre_label = out.max(dim=1)[1].data.numpy()
pre_label = [i for i in pre_label]
true_label = target.data.numpy()
true_label = [i for i in true_label]

eval_metrix = es.eval_semantic_segmentation(pre_label, true_label, 21)
print(eval_metrix)
acc, acc_cls, mean_iu, fwavacc = label_accuracy_score(pre_label, true_label, 21)

print(acc)
print(acc_cls)
print(mean_iu)
print(fwavacc)