import numpy as np
import torch
import torch.nn.functional as F
from PIL import Image

from utils.evaluation.prediction import Prediction


def fast_hist(a, b, n):
    # --------------------------------------------------------------------------------#
    #   a is the label converted into a one-dimensional array, shape (H×W,);
    #   b is the predicted result converted into a one-dimensional array, shape (H×W,)
    # --------------------------------------------------------------------------------#
    k = (a >= 0) & (a < n)
    # --------------------------------------------------------------------------------#
    #   np.bincount calculates the number of occurrences of each of the n2 numbers from 0 to n2-1,
    #   and the return value shape (n, n)
    #   In the return, write the pixels on the diagonal line as correctly classified
    # --------------------------------------------------------------------------------#
    return np.bincount(n * a[k].astype(int) + b[k], minlength=n ** 2).reshape(n, n)


def per_class_iu(hist):
    return np.diag(hist) / np.maximum((hist.sum(1) + hist.sum(0) - np.diag(hist)), 1)


def per_class_PA(hist):
    return np.diag(hist) / np.maximum(hist.sum(1), 1)


def compute_mIoU(gt_imgs, pred_imgs_name, num_classes, name_classes):
    print('Num classes', num_classes)

    # -----------------------------------------#
    #   Create a matrix of all 0s, which is a confusion matrix
    # -----------------------------------------#
    hist = np.zeros((num_classes, num_classes))

    # ------------------------------------------------#
    #   Read every (picture-tag) pair
    # ------------------------------------------------#
    for ind in range(len(gt_imgs)):
        # ------------------------------------------------#
        #   Read an image segmentation result and convert it into a numpy array
        # ------------------------------------------------#
        pred = np.array(Image.open(pred_imgs_name[ind]))
        # ------------------------------------------------#
        #   Read a corresponding tag and convert it into a numpy array
        # ------------------------------------------------#
        label = np.array(gt_imgs[ind])

        # If the image segmentation result is not the same as the size of the label, this image will not be calculated
        if len(label.flatten()) != len(pred.flatten()):
            print(
                'Skipping: len(gt) = {:d}, len(pred) = {:d}, {:s}, {:s}'.format(
                    len(label.flatten()), len(pred.flatten()), gt_imgs[ind],
                    pred_imgs_name[ind]))
            continue

        # ------------------------------------------------#
        #   Calculate a 21×21 hist matrix for a picture and accumulate it
        # ------------------------------------------------#
        hist += fast_hist(label.flatten(), pred.flatten(), num_classes)
        # For every 10 calculations,
        # output the average m Io U value of all categories in the currently calculated pictures
        if ind > 0 and ind % 10 == 0:
            print('{:d} / {:d}: mIou-{:0.2f}; mPA-{:0.2f}'.format(ind, len(gt_imgs),
                                                                  100 * np.nanmean(per_class_iu(hist)),
                                                                  100 * np.nanmean(per_class_PA(hist))))
    # ------------------------------------------------#
    #   Calculate the category-by-category mIoU value of all validation set images
    # ------------------------------------------------#
    mIoUs = per_class_iu(hist)
    mPA = per_class_PA(hist)
    # ------------------------------------------------#
    #   print the mIoU value by category
    # ------------------------------------------------#
    for ind_class in range(num_classes):
        print('===>' + name_classes[ind_class] + ':\tmIou-' + str(round(mIoUs[ind_class] * 100, 2)) + '; mPA-' + str(
            round(mPA[ind_class] * 100, 2)))

    # -----------------------------------------------------------------#
    #   Calculate the average mIoU value of all categories on all the images of the validation set,
    #   and ignore the NaN value in the calculation
    # -----------------------------------------------------------------#
    print('===> mIoU: ' + str(round(np.nanmean(mIoUs) * 100, 2)) + '; mPA: ' + str(round(np.nanmean(mPA) * 100, 2)))
    return mIoUs


class miou_Unet(Prediction):
    def __init__(self, net, num_classes, model_path, model_image_size=(256, 256, 3),
                 cuda=True, blend=True):
        super().__init__(net=net,
                         model_path=model_path,
                         num_classes=num_classes,
                         model_image_size=model_image_size,
                         cuda=cuda,
                         blend=blend)
        self.model_path = model_path
        self.model_image_size = model_image_size
        self.num_classes = num_classes
        self.cuda = cuda
        self.blend = blend
        self.net = net
        self.generate()

    def detect_image(self, old_img, images, nw, nh):
        orininal_h = np.array(old_img).shape[0]
        orininal_w = np.array(old_img).shape[1]

        with torch.no_grad():
            images = torch.from_numpy(images).type(torch.FloatTensor)
            if self.cuda:
                images = images.cuda()

            pr = self.net(images)[0]
            pr = F.softmax(pr.permute(1, 2, 0), dim=-1).cpu().numpy().argmax(axis=-1)
            pr = pr[int((self.model_image_size[0] - nh) // 2):int((self.model_image_size[0] - nh) // 2 + nh),
                 int((self.model_image_size[1] - nw) // 2):int((self.model_image_size[1] - nw) // 2 + nw)]

        images = Image.fromarray(np.uint8(pr)).resize((orininal_w, orininal_h), Image.NEAREST)
        return images
