import math

import cogdl.data
import numpy as np
import torch
import torch.nn.functional as F

from silearn import scatter_sum, scatter_cnt


def calc_ASA(label:torch.Tensor, groundtruth: torch.Tensor, xx = None):
    assert label.shape == groundtruth.shape
    label=torch.tensor(label).long()
    groundtruth=torch.tensor(groundtruth).long()
    imgwh = label.shape[0] * label.shape[1]
    gt = F.one_hot(groundtruth, num_classes=groundtruth.max() + 1).reshape(imgwh, -1)
    label = label.reshape(-1)
    if xx != None:
        xx = xx.reshape(-1)
        gt = gt[(xx == False), :]
        label = label[(xx == False)]
    stat = scatter_sum(gt, label)
    label_mx = torch.max(stat, dim = 1)[0]
    label_acc = torch.sum(label_mx) / imgwh
    return label_acc



def get_edge(img):
    return torch.clip(torch.abs(torch.nn.functional.pad(img[:, :-1] - img[:, 1:], (0,1,0,0))) +
                   torch.abs(torch.nn.functional.pad(img[:, :-1] - img[:, 1:], (1,0,0,0))) +
                   torch.abs(torch.nn.functional.pad(img[:-1, :] - img[1:, :], (0,0,0,1))) +
                   torch.abs(torch.nn.functional.pad(img[:-1, :] - img[1:, :], (0,0,1,0))), max=1)

def calc_BD(label:torch.Tensor, groundtruth: torch.Tensor, r = None, xx = None):
    imgwh = label.shape[0] * label.shape[1]
    label=torch.tensor(label).float()
    groundtruth=torch.tensor(groundtruth).float()
    labele = get_edge(label)
    gte = get_edge(groundtruth)
    if xx is not None:
        gte = xx.float()
    if r is None:
        r = round((label.shape[0] **2 + label.shape[1] **2) ** 0.5 * 0.0025)
    kernel = torch.ones(1,1,r * 2 + 1,r * 2 + 1, device=label.device)
    expand_l = torch.clip(torch.conv2d(labele.unsqueeze(0).unsqueeze(0), kernel, padding=r), max=1)
    expand_gt = torch.clip(torch.conv2d(gte.unsqueeze(0).unsqueeze(0), kernel, padding=r), max=1)


    rec = 1 - torch.sum(torch.clip(gte - expand_l, min = 0)) / torch.sum(gte)
    pre = 1 - torch.sum(torch.clip(labele - expand_gt, min = 0)) / torch.sum(labele)
    # l r u d
    return pre, rec



def _PRIandVI(pred,gt):
    (tx,ty)=pred.shape

    num1 = np.max(pred)
    num2 = np.max(gt)
    confcounts = np.zeros((int(num1)+1,num2+1))

    for i in range(tx):
        for j in range(ty):
            u=pred[i,j]
            v=gt[i,j]

            confcounts[u,v]=confcounts[u,v]+1

    RI = _rand_index(confcounts)
    VI = _variation_of_information(confcounts)

    return RI,VI

def _rand_index(n):
    N=np.sum(n)
    n_u=np.sum(n,axis=1)
    n_v=np.sum(n,axis=0)

    N_choose_2=N*(N-1)/2

    ri=1-(np.sum(n_u * n_u)/2 +np.sum(n_v * n_v)/2 -np.sum(n*n))/N_choose_2

    return ri

def _log2_quotient(A, B):
    lq = np.log2((A + ((A == 0) * B) + (B == 0)) / (B + (B == 0)))
    return lq

def _variation_of_information(n):
    N=np.sum(n)

    joint = n/N

    marginal_2 = np.sum(joint,axis=0)
    marginal_1 = np.sum(joint,axis=1)

    H1=-np.sum(marginal_1 * np.log2(marginal_1 + (marginal_1 == 0)))
    H2=-np.sum(marginal_2 * np.log2(marginal_2 + (marginal_2 == 0)))

    MI=np.sum(joint * _log2_quotient(joint,np.dot(marginal_1.reshape(-1,1),marginal_2.reshape(1,-1))))

    vi=H1+H2 - 2 *MI

    return vi

def calc_pri_vi(pred: torch.Tensor, gt,void_bound = None, xx = None, scc = False):
    if void_bound is not None:
        xx = gt == void_bound
    def pri(conf, N):
        n_u = torch.sparse.sum(conf, dim=1)
        n_v = torch.sparse.sum(conf, dim=0)
        N_choose_2 = N * (N - 1) / 2
        ri = 1 - (torch.sparse.sum(n_u * n_u) / 2 + torch.sparse.sum(n_v * n_v) / 2 - torch.sparse.sum(conf * conf)) / N_choose_2
        return ri

    def sc(conf, N):
        conf = conf.to_dense()
        n_u = torch.sum(conf, dim=1) # gts
        n_v = torch.sum(conf, dim=0) # preds
        cov = conf / (n_v.reshape(1, -1) + n_u.reshape(-1, 1) - conf)
        cov = torch.max(cov, dim = 0)[0]
        return torch.sum(n_v * cov / N)

    def _log2_quotient(A, B):
        lq = torch.log2((A + ((A == 0) * B) + (B == 0)) / (B + (B == 0)))
        return lq
    def vi(conf, N):
        joint = conf / N

        marginal_2 = torch.sparse.sum(joint, dim=0).to_dense()
        marginal_1 = torch.sparse.sum(joint, dim=1).to_dense()

        H1 = -torch.sum(marginal_1 * torch.log2(marginal_1 + (marginal_1 == 0)))
        H2 = -torch.sum(marginal_2 * torch.log2(marginal_2 + (marginal_2 == 0)))
        joint = joint.to_dense()

        MI = torch.sum(joint * _log2_quotient(joint, torch.mm(marginal_1.reshape(-1, 1), marginal_2.reshape(1, -1))))

        vi = H1 + H2 - 2 * MI

        return vi

    gt = gt.long().reshape(-1)
    pred = pred.reshape(-1)
    if xx != None:
        xx = xx.reshape(-1)
        gt = gt[(xx == False)]
        pred = pred[(xx == False)]
    NN = pred.shape[0]

    # num1 = torch.max(pred)
    # num2 = torch.max(gt)

    # confcounts = torch.zeros((int(num1)+1, num2+1), device=pred.device)
    i1 = torch.stack((torch.arange(NN, device=pred.device, dtype=torch.int64), pred.reshape(-1).long().reshape(-1)))
    i2 = torch.stack((gt, torch.arange(NN, device=pred.device, dtype=torch.int64)))

    i1 = torch.sparse_coo_tensor(i1, values=torch.ones_like(i1[0], dtype=torch.float))
    i2 = torch.sparse_coo_tensor(i2, values=torch.ones_like(i2[0], dtype=torch.float))
    confcounts = torch.sparse.mm(i2, i1)
    if scc:
        return sc(confcounts, NN), pri(confcounts, NN), vi(confcounts, NN)
    return pri(confcounts, NN), vi(confcounts, NN)




def evaluate(pred, gt, void_bound = None):
    dev = "cuda" if torch.cuda.is_available() else "cpu"
    pred = torch.tensor(pred).to(dev)
    gt = torch.tensor(gt).to(dev)
    xx = None
    if void_bound != None:
        xx = (gt == void_bound)
        gt = torch.unique(gt, return_inverse= True)[1]
    pri, vi = calc_pri_vi(pred, gt, xx = xx)
    gce = 1 # not used
    asa = calc_ASA(pred, gt, xx=xx)
    b1, b2 = calc_BD(pred, gt, xx=xx)
    c = pred.max() + 1
    return float(pri), float(vi), 1, float(asa), float(b1), float(b2), float(c)


def calc_edge(logit1, logit2, mg):
    return -torch.sum((logit1.unsqueeze(0)  + logit2.unsqueeze(1)) * mg)


# def calc_norm_si(img, segs, r = None, t = 0.001):
#     w, es, et = pix_graph_builder.get_balanced_graph_conv(img / 255, r)
#     norm = float(torch.mean(abs(w)))
#     w /= norm * 0.5
#     w = torch.exp2(w)
#     si = si_algorithms.TwoDimSE(cogdl.data.data.Graph(edge_index = (es, et), edge_weight = w))
#     return si.calc_entropy_hierarchical(segs) / si.calc_e1()

#
# def calc_mg_full_connected(code1s, code2s, parent_code1, parent_code2):
#     code1s_sz = torch.max(code1s) + 1
#     code2s_sz = torch.max(code2s) + 1
#     tot_sz = code1s.shape[0] * code1s.shape[1]
#     hash_code = torch.zeros(code1s.shape[0], code1s.shape[1], code1s.shape[2] * code2s.shape[2], device=code1s.device, dtype=torch.int64)
#     for i in range(code1s.shape[1]):
#         for j in range(code2s.shape[2]):
#             hash_code[:, :, i * code1s_sz + j] = code1s[:, :, i] * code2s_sz + code2s[:, :, j]
#     union_mat = cluster_cnt(hash_code.reshape(-1), clip_length= code1s_sz*code2s_sz).reshape(code1s_sz, code2s_sz)
#     hash_code = None
#
#     # code expression
#     code1_sz = cluster_cnt(code1s.reshape(-1), clip_length=code1s_sz)
#     code2_sz = cluster_cnt(code2s.reshape(-1), clip_length=code2s_sz)
#
#     code1_sz[0] = code2_sz[0] = tot_sz
#     parent_code1[0] = parent_code2[0] = 0
#
#     mat_g = (-union_mat + code1_sz.reshape(code1s_sz, -1) + code2_sz.reshape(-1, code2_sz) )
#     mat_g = (tot_sz - mat_g) * union_mat
#     logit_1 = torch.log2(code1_sz / code1_sz[parent_code1])
#     logit_2 = torch.log2(code2_sz / code2_sz[parent_code2])
#     en22 = calc_edge(logit_1, logit_2, mat_g)
#
#     # position expression
#     code1_szs = code1_sz[code1s[:, :, 0]]
#     code2_szs = code2_sz[code2s[:, :, 0]]
#     logit_11 = -torch.log2(code1_szs)
#     logit_21 = -torch.log2(code2_szs)
#     logit_12 = torch.log2(code1_szs / code1_szs[parent_code1])
#     logit_22 = torch.log2(code2_szs / code2_szs[parent_code2])
#     en12 = torch.sum(-(logit_11 + logit_22) * (tot_sz - code2_szs))
#     en21 = torch.sum(-(logit_12 + logit_21) * (tot_sz - code1_szs))
#
#     en11 = torch.sum(-(code2_szs + code1_szs - union_mat[code1s[:, :, 0],code2s[:, :, 0]] - 1) * (logit_11 + logit_21))
#
#     return en11 + en12 + en21 + en22
#

#
# def calc_cross_coding_full_connected(code1s, code2s):
#     code1s_sz = torch.max(code1s) + 1
#     code2s_sz = torch.max(code2s) + 1
#     tot_sz = code1s.shape[0] * code1s.shape[1]
#     # code expression
#     hash_code = code1s * code2s_sz + code2s
#     union_mat = cluster_cnt(hash_code.reshape(-1), clip_length = code1s_sz*code2s_sz).reshape(code1s_sz, code2s_sz)
#     code1_sz = cluster_cnt(code1s.reshape(-1), clip_length=code1s_sz)
#     code2_sz = cluster_cnt(code2s.reshape(-1), clip_length=code2s_sz)
#
#
#
#
#     # position expression
#     code1_szs = code1_sz[code1s]
#     code2_szs = code2_sz[code2s]
#     logit_11 = -torch.log2(code1_szs.float())
#     logit_21 = -torch.log2(code2_szs.float())
#     logit_relative_22 = -torch.log2(union_mat[code1s, code2s])
#     logit_12 = torch.log2(code1_szs.float() / tot_sz)
#     logit_22 = torch.log2(code2_szs.float() / tot_sz)
#     en12 = torch.sum(-(logit_relative_22 + logit_22) * (tot_sz - code2_szs))
#     print(en12)
#     en21 = torch.sum(-(logit_relative_22 + logit_12) * (tot_sz - code1_szs))
#     print(en21)
#
#     en11 = torch.sum(-(logit_11 + logit_21) * (code2_szs + code1_szs - union_mat[code1s,code2s] - 1))
#     print(en11)
#
#     # en22 = torch.sum(-(logit_12 + logit_22) * (tot_sz - code2_szs - code1_szs + union_mat[code1s,code2s]))
#     # print(en22)
#     en22 = 0
#
#     en1 = torch.sum(-logit_11 * (tot_sz - 1) - logit_12 * (tot_sz - code1_szs)) #/ (tot_sz * (tot_sz-1))
#     en2 = torch.sum(-logit_21 * (tot_sz - 1) - logit_22 * (tot_sz - code2_szs)) #/ (tot_sz * (tot_sz-1))
#     print(en1)
#     print(en2)
#
#     return (en11 + en12 + en21 + en22) / (tot_sz * (tot_sz-1)), en1/ (tot_sz * (tot_sz-1)), en2/ (tot_sz * (tot_sz-1))
#
# def calc_diff_coding(code1s, code2s):
#     hxy, hx, hy = calc_cross_coding_full_connected(code1s, code2s)
#     return hxy - hx - hy


#
#
# def multiscale_si(g0, seg, M_step = 16, lglgMAX = 10):
#     seg = torch.as_tensor(seg, device = g0.device).long()
#     from model.si_algorithms import TwoDimSE
#     si = TwoDimSE(g0)
#     si.force_coding(seg)
#
#     M = 1
#     si0 = si.calc_entropy(mmm = M)  / math.log2(math.log2(M * seg.shape[0]))#/ si.calc_e1(mmm = M)
#     result = [[math.log2(M), si0]]
#     si1 = 0
#     lg_lg_t = 0
#     MAX = 2 ** lglgMAX
#     step = math.log2(MAX) / 200
#     log_M = (2** lg_lg_t - 1)
#
#     while log_M < MAX:#si1 < si0 and M < 2 ** 20:
#         si1 = si.calc_entropy(log_M=log_M) / math.log2(log_M + math.log2(seg.shape[0]))#/ si.calc_e1(mmm = M)
#         result.append([log_M, si1])
#         lg_lg_t += step
#         log_M = (2 ** lg_lg_t - 1)
#         print(result[-1])
#
#     return result
#
#
# def seed_min(g0, seg, lglgM_max = 10, seeds = 10, eps = 0.001):
#     Ms = [lglgM_max / (seeds - 1) * i for i in range(seeds)]
#
#     dev =  "cuda" if torch.cuda.is_available() else "cpu"
#     seg = torch.tensor(seg).long().to(dev)
#     from model.si_algorithms import TwoDimSE
#     si = TwoDimSE(g0.to(dev))
#     si.force_coding(seg)
#     h0, gg = si.calc_entropy(incremental=True)
#     h0, gg = float(h0), float(gg)
#     # print(h0, gg)
#     def f(lglgM):
#         log_M = (2** lglgM - 1)
#         return (h0 + gg * log_M ) / math.log2(log_M + math.log2(seg.shape[0]))  # / si.calc_e1(mmm = M)
#
#     def getans(l, r, ansl, ansr):
#         ll = l + (r - l) / 3
#         rr = l + (r - l) / 3 * 2
#         ansll = f(ll)
#         ansrr = f(rr)
#         bds = [l, ll, rr, r]
#         ff = [float(ansl), float(ansll), float(ansrr), float(ansr)]
#         idx = np.argmin(ff)
#         # print(l, r, ff)
#
#         if r - l < eps:
#             return ff[idx], bds[idx]
#         if idx < 2:
#             return getans(l, rr, ansl, ansrr)
#         else:
#             return getans(ll, r, ansll, ansr)
#
#     ffs = [f(i) for i in Ms]
#     # print(ffs)
#     min_idx = max(min(np.argmin(ffs), len(Ms) - 2), 1)
#
#     return getans(Ms[min_idx - 1], Ms[min_idx + 1], ffs[min_idx - 1], ffs[min_idx + 1])
#
