from typing import Tuple, Any, Union

import torch
import numpy as np
import torch.nn as nn
from numpy import ndarray
from sklearn import metrics
from torch.autograd import Function
from scipy.ndimage import convolve, distance_transform_edt as bwdist
import numba

_EPS = np.spacing(1)
_TYPE = np.float64

@numba.jit(nopython=True)
def _prepare_data(pred: np.ndarray, gt: np.ndarray) -> tuple:
    gt = gt > 128
    pred = pred / 255
    if pred.max() != pred.min():
        pred = (pred - pred.min()) / (pred.max() - pred.min())
    return pred, gt

@numba.jit(nopython=True)
def _get_adaptive_threshold(matrix: np.ndarray, max_value: float = 1) -> float:
    return min(2 * matrix.mean(), max_value)


class Fmeasure(object):
    def __init__(self, beta: float = 0.3):
        self.beta = beta
        self.precisions = []
        self.recalls = []
        self.adaptive_fms = []
        self.changeable_fms = []

    def step(self, pred: np.ndarray, gt: np.ndarray):
        pred, gt = _prepare_data(pred, gt)

        adaptive_fm = self.cal_adaptive_fm(pred=pred, gt=gt)
        self.adaptive_fms.append(adaptive_fm)

        precisions, recalls, changeable_fms = self.cal_pr(pred=pred, gt=gt)
        self.precisions.append(precisions)
        self.recalls.append(recalls)
        self.changeable_fms.append(changeable_fms)

    def cal_adaptive_fm(self, pred: np.ndarray, gt: np.ndarray) -> float:
        adaptive_threshold = _get_adaptive_threshold(pred, max_value=1)
        binary_predcition = pred >= adaptive_threshold
        area_intersection = binary_predcition[gt].sum()
        if area_intersection == 0:
            adaptive_fm = 0
        else:
            pre = area_intersection / np.count_nonzero(binary_predcition)
            rec = area_intersection / np.count_nonzero(gt)
            adaptive_fm = (1 + self.beta) * pre * rec / (self.beta * pre + rec)
        return adaptive_fm

    def cal_pr(self, pred: np.ndarray, gt: np.ndarray) -> tuple:
        pred = (pred * 255).astype(np.uint8)
        bins = np.linspace(0, 256, 257)
        fg_hist, _ = np.histogram(pred[gt], bins=bins)
        bg_hist, _ = np.histogram(pred[~gt], bins=bins)
        fg_w_thrs = np.cumsum(np.flip(fg_hist), axis=0)
        bg_w_thrs = np.cumsum(np.flip(bg_hist), axis=0)
        TPs = fg_w_thrs
        Ps = fg_w_thrs + bg_w_thrs
        Ps[Ps == 0] = 1
        T = max(np.count_nonzero(gt), 1)
        precisions = TPs / Ps
        recalls = TPs / T
        numerator = (1 + self.beta) * precisions * recalls
        denominator = np.where(numerator == 0, 1, self.beta * precisions + recalls)
        changeable_fms = numerator / denominator
        return precisions, recalls, changeable_fms

    def get_results(self) -> dict:
        adaptive_fm = np.mean(np.array(self.adaptive_fms, _TYPE))
        changeable_fm = np.mean(np.array(self.changeable_fms, dtype=_TYPE), axis=0)
        precision = np.mean(np.array(self.precisions, dtype=_TYPE), axis=0)  # N, 256
        recall = np.mean(np.array(self.recalls, dtype=_TYPE), axis=0)  # N, 256
        return dict(fm=dict(adp=adaptive_fm, curve=changeable_fm),
                    pr=dict(p=precision, r=recall))



"""=============================== MAE ==============================="""
@numba.jit(nopython=True)
def _cal_mae(pred: np.ndarray, gt: np.ndarray) -> ndarray:
    mae = np.mean(np.abs(pred - gt))
    return mae

class MAE(object):
    def __init__(self):
        self.maes = []

    def step(self, pred: np.ndarray, gt: np.ndarray):
        # pred, gt = _prepare_data(pred, gt)

        mae = self.cal_mae(pred, gt)
        self.maes.append(mae)

    def cal_mae(self, pred: np.ndarray, gt: np.ndarray) -> ndarray:
        return _cal_mae(pred, gt)

    def get_results(self) -> dict:
        mae = np.mean(np.array(self.maes, _TYPE))
        return dict(mae=mae)


"""=============================== Smeasure ==============================="""
@numba.jit(nopython=True)
def _object(pred: np.ndarray, gt: np.ndarray) -> float:
    fg = pred * gt
    bg = (1 - pred) * (1 - gt)
    u = np.mean(gt)
    object_score = u * _s_object(fg, gt) + (1 - u) * _s_object(bg, 1 - gt)
    return object_score

@numba.jit(nopython=True)
def _s_object(pred: np.ndarray, gt: np.ndarray) -> float:
    pred, gt = pred.ravel(), gt.ravel()
    x = np.mean(pred[gt == 1])
    # sigma_x = np.std(pred[gt == 1], ddof=1)
    sigma_x = _ddof_std(pred[gt == 1])
    score = 2 * x / (np.power(x, 2) + 1 + sigma_x + _EPS)
    return score

@numba.jit(nopython=True)
def _ddof_std(x: np.ndarray) -> float:
    x = np.abs(x - np.mean(x)) ** 2
    l = len(x)
    std = np.sqrt(np.sum(x) / (l - 1))
    return std

@numba.jit(nopython=True)
def _centroid(matrix: np.ndarray) -> tuple:
    h, w = matrix.shape
    if matrix.sum() == 0:
        x = np.round(w / 2)
        y = np.round(h / 2)
    else:
        area_object = np.sum(matrix)
        row_ids = np.arange(h)
        col_ids = np.arange(w)
        x = np.round(np.sum(np.sum(matrix, axis=0) * col_ids) / area_object)
        y = np.round(np.sum(np.sum(matrix, axis=1) * row_ids) / area_object)
    return int(x) + 1, int(y) + 1

@numba.jit(nopython=True)
def _ssim(pred: np.ndarray, gt: np.ndarray) -> float:
    h, w = pred.shape
    N = h * w

    x = np.mean(pred)
    y = np.mean(gt)

    sigma_x = np.sum((pred - x) ** 2) / (N - 1)
    sigma_y = np.sum((gt - y) ** 2) / (N - 1)
    sigma_xy = np.sum((pred - x) * (gt - y)) / (N - 1)

    alpha = 4 * x * y * sigma_xy
    beta = (x ** 2 + y ** 2) * (sigma_x + sigma_y)

    if alpha != 0:
        score = alpha / (beta + _EPS)
    elif alpha == 0 and beta == 0:
        score = 1
    else:
        score = 0
    return score

@numba.jit(nopython=True)
def _divide_with_xy(pred: np.ndarray, gt: np.ndarray, x, y) -> tuple[
    tuple[Any, Any, Any, Any], tuple[Any, Any, Any, Any], tuple[Any, Any, Any, Union[int, Any]]]:
    h, w = gt.shape
    area = h * w

    gt_LT = gt[0:y, 0:x]
    gt_RT = gt[0:y, x:w]
    gt_LB = gt[y:h, 0:x]
    gt_RB = gt[y:h, x:w]

    pred_LT = pred[0:y, 0:x]
    pred_RT = pred[0:y, x:w]
    pred_LB = pred[y:h, 0:x]
    pred_RB = pred[y:h, x:w]

    w1 = x * y / area
    w2 = y * (w - x) / area
    w3 = (h - y) * x / area
    w4 = 1 - w1 - w2 - w3

    return (gt_LT, gt_RT, gt_LB, gt_RB),\
            (pred_LT, pred_RT, pred_LB, pred_RB), \
            (w1, w2, w3, w4)

@numba.jit(nopython=True)
def _region(pred: np.ndarray, gt: np.ndarray) -> float:
    x, y = _centroid(gt)
    part_info = _divide_with_xy(pred, gt, x, y)
    w1, w2, w3, w4 = part_info[2]
    pred1, pred2, pred3, pred4 = part_info[1]
    gt1, gt2, gt3, gt4 = part_info[0]
    score1 = _ssim(pred1, gt1)
    score2 = _ssim(pred2, gt2)
    score3 = _ssim(pred3, gt3)
    score4 = _ssim(pred4, gt4)

    return w1 * score1 + w2 * score2 + w3 * score3 + w4 * score4
class Smeasure(object):
    def __init__(self, alpha: float = 0.5):
        self.sms = []
        self.alpha = alpha

    def step(self, pred: np.ndarray, gt: np.ndarray):
        # pred, gt = _prepare_data(pred=pred, gt=gt)
        sm = self.cal_sm(pred, gt)
        self.sms.append(sm)

    def cal_sm(self, pred: np.ndarray, gt: np.ndarray) -> float:
        y = np.mean(gt)
        if y == 0:
            sm = 1 - np.mean(pred)
        elif y == 1:
            sm = np.mean(pred)
        else:
            sm = self.alpha * _object(pred, gt) + (1 - self.alpha) * _region(pred, gt)
            sm = max(0, sm)
        return sm

    def get_results(self) -> dict:
        sm = np.mean(np.array(self.sms, dtype=_TYPE))
        return dict(sm=sm)


class Emeasure(object):
    def __init__(self):
        self.adaptive_ems = []
        self.changeable_ems = []

    def step(self, pred: np.ndarray, gt: np.ndarray):
        pred, gt = _prepare_data(pred=pred, gt=gt)
        self.gt_fg_numel = np.count_nonzero(gt)
        self.gt_size = gt.shape[0] * gt.shape[1]

        changeable_ems = self.cal_changeable_em(pred, gt)
        self.changeable_ems.append(changeable_ems)
        adaptive_em = self.cal_adaptive_em(pred, gt)
        self.adaptive_ems.append(adaptive_em)

    def cal_adaptive_em(self, pred: np.ndarray, gt: np.ndarray) -> float:
        adaptive_threshold = _get_adaptive_threshold(pred, max_value=1)
        adaptive_em = self.cal_em_with_threshold(pred, gt, threshold=adaptive_threshold)
        return adaptive_em

    def cal_changeable_em(self, pred: np.ndarray, gt: np.ndarray) -> np.ndarray:
        changeable_ems = self.cal_em_with_cumsumhistogram(pred, gt)
        return changeable_ems

    def cal_em_with_threshold(self, pred: np.ndarray, gt: np.ndarray, threshold: float) -> float:
        binarized_pred = pred >= threshold
        fg_fg_numel = np.count_nonzero(binarized_pred & gt)
        fg_bg_numel = np.count_nonzero(binarized_pred & ~gt)

        fg___numel = fg_fg_numel + fg_bg_numel
        bg___numel = self.gt_size - fg___numel

        if self.gt_fg_numel == 0:
            enhanced_matrix_sum = bg___numel
        elif self.gt_fg_numel == self.gt_size:
            enhanced_matrix_sum = fg___numel
        else:
            parts_numel, combinations = self.generate_parts_numel_combinations(
                fg_fg_numel=fg_fg_numel, fg_bg_numel=fg_bg_numel,
                pred_fg_numel=fg___numel, pred_bg_numel=bg___numel,
            )

            results_parts = []
            for i, (part_numel, combination) in enumerate(zip(parts_numel, combinations)):
                align_matrix_value = 2 * (combination[0] * combination[1]) / \
                                     (combination[0] ** 2 + combination[1] ** 2 + _EPS)
                enhanced_matrix_value = (align_matrix_value + 1) ** 2 / 4
                results_parts.append(enhanced_matrix_value * part_numel)
            enhanced_matrix_sum = sum(results_parts)

        em = enhanced_matrix_sum / (self.gt_size - 1 + _EPS)
        return em

    def cal_em_with_cumsumhistogram(self, pred: np.ndarray, gt: np.ndarray) -> np.ndarray:
        pred = (pred * 255).astype(np.uint8)
        bins = np.linspace(0, 256, 257)
        fg_fg_hist, _ = np.histogram(pred[gt], bins=bins)
        fg_bg_hist, _ = np.histogram(pred[~gt], bins=bins)
        fg_fg_numel_w_thrs = np.cumsum(np.flip(fg_fg_hist), axis=0)
        fg_bg_numel_w_thrs = np.cumsum(np.flip(fg_bg_hist), axis=0)

        fg___numel_w_thrs = fg_fg_numel_w_thrs + fg_bg_numel_w_thrs
        bg___numel_w_thrs = self.gt_size - fg___numel_w_thrs

        if self.gt_fg_numel == 0:
            enhanced_matrix_sum = bg___numel_w_thrs
        elif self.gt_fg_numel == self.gt_size:
            enhanced_matrix_sum = fg___numel_w_thrs
        else:
            parts_numel_w_thrs, combinations = self.generate_parts_numel_combinations(
                fg_fg_numel=fg_fg_numel_w_thrs, fg_bg_numel=fg_bg_numel_w_thrs,
                pred_fg_numel=fg___numel_w_thrs, pred_bg_numel=bg___numel_w_thrs,
            )

            results_parts = np.empty(shape=(4, 256), dtype=np.float64)
            for i, (part_numel, combination) in enumerate(zip(parts_numel_w_thrs, combinations)):
                align_matrix_value = 2 * (combination[0] * combination[1]) / \
                                     (combination[0] ** 2 + combination[1] ** 2 + _EPS)
                enhanced_matrix_value = (align_matrix_value + 1) ** 2 / 4
                results_parts[i] = enhanced_matrix_value * part_numel
            enhanced_matrix_sum = results_parts.sum(axis=0)

        em = enhanced_matrix_sum / (self.gt_size - 1 + _EPS)
        return em

    def generate_parts_numel_combinations(self, fg_fg_numel, fg_bg_numel, pred_fg_numel, pred_bg_numel):
        bg_fg_numel = self.gt_fg_numel - fg_fg_numel
        bg_bg_numel = pred_bg_numel - bg_fg_numel

        parts_numel = [fg_fg_numel, fg_bg_numel, bg_fg_numel, bg_bg_numel]

        mean_pred_value = pred_fg_numel / self.gt_size
        mean_gt_value = self.gt_fg_numel / self.gt_size

        demeaned_pred_fg_value = 1 - mean_pred_value
        demeaned_pred_bg_value = 0 - mean_pred_value
        demeaned_gt_fg_value = 1 - mean_gt_value
        demeaned_gt_bg_value = 0 - mean_gt_value

        combinations = [
            (demeaned_pred_fg_value, demeaned_gt_fg_value),
            (demeaned_pred_fg_value, demeaned_gt_bg_value),
            (demeaned_pred_bg_value, demeaned_gt_fg_value),
            (demeaned_pred_bg_value, demeaned_gt_bg_value)
        ]
        return parts_numel, combinations

    def get_results(self) -> dict:
        adaptive_em = np.mean(np.array(self.adaptive_ems, dtype=_TYPE))
        changeable_em = np.mean(np.array(self.changeable_ems, dtype=_TYPE), axis=0)
        return dict(em=dict(adp=adaptive_em, curve=changeable_em))

"""=============================== WeightedFmeasure ==============================="""

@numba.jit(nopython=True)
def _cal_wfm_continue(Dst, E, EA, gt, beta):
    MIN_E_EA = np.where(gt & (EA < E), EA, E)
    # %Pixel importance
    B = np.where(gt == 0, 2 - np.exp(np.log(0.5) / 5 * Dst), np.ones_like(gt))
    Ew = MIN_E_EA * B
    Ew, gt = Ew.ravel(), gt.ravel()
    TPw = np.sum(gt) - np.sum(Ew[gt == 1])
    FPw = np.sum(Ew[gt == 0])
    R = 1 - np.mean(Ew[gt == 1])
    P = TPw / (TPw + FPw + _EPS)
    # % Q = (1+Beta^2)*(R*P)./(eps+R+(Beta.*P));
    Q = (1 + beta) * R * P / (R + beta * P + _EPS)
    return Q

class WeightedFmeasure(object):
    def __init__(self, beta: float = 1):
        self.beta = beta
        self.weighted_fms = []
        self.matlab_style_gauss2D = self._matlab_style_gauss2D((7, 7), sigma=5)
    @staticmethod
    def _matlab_style_gauss2D(shape: tuple = (7, 7), sigma: int = 5) -> np.ndarray:
        """
        2D gaussian mask - should give the same result as MATLAB's
        fspecial('gaussian',[shape],[sigma])
        """
        m, n = [(ss - 1) / 2 for ss in shape]
        y, x = np.ogrid[-m: m + 1, -n: n + 1]
        h = np.exp(-(x * x + y * y) / (2 * sigma * sigma))
        h[h < np.finfo(h.dtype).eps * h.max()] = 0
        sumh = h.sum()
        if sumh != 0:
            h /= sumh
        return h
    def step(self, pred: np.ndarray, gt: np.ndarray):
        # pred, gt = _prepare_data(pred=pred, gt=gt)
        if np.all(~gt):
            wfm = 0
        else:
            wfm = self.cal_wfm(pred, gt)
        self.weighted_fms.append(wfm)

    def cal_wfm(self, pred: np.ndarray, gt: np.ndarray) -> float:
        # [Dst,IDXT] = bwdist(dGT);
        Dst, Idxt = bwdist(gt == 0, return_indices=True)

        # %Pixel dependency
        # E = abs(FG-dGT);
        E = np.abs(pred - gt)
        Et = np.copy(E)
        Et[gt == 0] = Et[Idxt[0][gt == 0], Idxt[1][gt == 0]]

        # K = fspecial('gaussian',7,5);
        # EA = imfilter(Et,K);
        K = self.matlab_style_gauss2D
        EA = convolve(Et, weights=K, mode="constant", cval=0)
        # MIN_E_EA = E;
        # MIN_E_EA(GT & EA<E) = EA(GT & EA<E);
        Q = _cal_wfm_continue(Dst, E, EA, gt, self.beta)
        return Q

    def get_results(self) -> dict:
        weighted_fm = np.mean(np.array(self.weighted_fms, dtype=_TYPE))
        return dict(wfm=weighted_fm)


class DICE(object):
    def __init__(self):
        self.dice = []

    def step(self, pred: np.ndarray, gt: np.ndarray):
        # pred, gt = _prepare_data(pred=pred, gt=gt)
        dice = self.cal_dice(pred, gt)
        self.dice.append(dice)
        return dice

    def cal_dice(self, pred: np.ndarray, gt: np.ndarray):
        # N = gt.size(0)
        smooth = 1

        pred_flat = pred.reshape(-1)
        gt_flat = gt.reshape(-1)

        intersection = pred_flat * gt_flat

        dice = 2 * (intersection.sum() + smooth) / (pred_flat.sum() + gt_flat.sum() + smooth)
        dice = 1 - dice.sum()

        return dice

    def get_results(self):
        dice = np.mean(np.array(self.dice, dtype=_TYPE))
        return dice


class BinarizedF(Function):
    @staticmethod
    def forward(ctx, input):
        ctx.save_for_backward(input)
        a = torch.ones_like(input)
        b = torch.zeros_like(input)
        output = torch.where(input>=0.5,a,b)
        return output

    @staticmethod
    def backward(ctx, output_grad):
        input, = ctx.saved_tensors
        input_abs = torch.abs(input)
        ones = torch.ones_like(input)
        zeros = torch.zeros_like(input)
        input_grad = torch.where(input_abs<=1,ones, zeros)
        return input_grad


class BinarizedModule(nn.Module):
    def __init__(self):
        super(BinarizedModule, self).__init__()
        self.BF = BinarizedF()

    def forward(self, input):
        output =self.BF.apply(torch.Tensor(input))
        return output


class IoU(object):
    def __init__(self):
        self.iou = []
        self.n_classes = 2
        self.bin = BinarizedModule()

    def step(self, pred: np.ndarray, gt: np.ndarray):
        iou = self.cal_iou(pred, gt)
        self.iou.append(iou)
        return iou

    def _cal_iou(self, pred: np.ndarray, gt: np.ndarray):
        def cal_cm(y_true, y_pred):
            y_true = y_true.reshape(1, -1).squeeze()
            y_pred = y_pred.reshape(1, -1).squeeze()
            cm = metrics.confusion_matrix(y_true, y_pred)
            return cm
        pred = self.bin(pred)
        confusion_matrix = cal_cm(pred, gt)
        intersection = np.diag(confusion_matrix)  # 交集
        union = np.sum(confusion_matrix, axis=1) + np.sum(confusion_matrix, axis=0) - np.diag(confusion_matrix)  # 并集
        IoU = intersection / union  # 交并比，即IoU
        return IoU

    def cal_iou(self, pred: np.ndarray, target: np.ndarray):
        Iand1 = np.sum(target * pred)
        Ior1 = np.sum(target) + np.sum(pred) - Iand1
        IoU1 = Iand1 / Ior1
        return IoU1

    def get_results(self):
        iou = np.mean(np.array(self.iou, dtype=_TYPE))
        return iou
