import numpy as np
from medpy import metric
from torch.nn import BCEWithLogitsLoss, MSELoss,CrossEntropyLoss
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
class Loss(object):
    def __init__(self):
        pass

    def crossEntropy_loss(self,img_pred,img_gt):
        return CrossEntropyLoss()(img_pred,img_gt)

    def mse_loss(self,img_pred,img_gt):
        return MSELoss()(img_pred,img_gt)

    def dice_loss(self,score, target):
        target = target.float()
        smooth = 1e-5
        intersect = torch.sum(score * target)
        y_sum = torch.sum(target * target)
        z_sum = torch.sum(score * score)
        loss = (2 * intersect + smooth) / (z_sum + y_sum + smooth)
        loss = 1 - loss
        return loss


    # 按照label类对比
    def contras_loss_bylabel(self):
        pass



    # 一圈一类策略。。每个距离相同的数量   seg_contra 经过了归一化
    def contras_loss_mean(self,sdm_gts,seg_contra,is_label=True,temperature=0.07):
        # sdm_gts:值是点到边界距离 [4, 256, 256]
        # seg_contra:[4, 64, 256, 256]
        # print("sdm_gts.unique()", sdm_gts.unique())
        # print("seg_contra.unique()", seg_contra.unique())

        sdm_unique = sdm_gts.unique()
        boundary_val_true_false = (sdm_unique <= 2) * (sdm_unique >= -2)

        # print("boundary_val_true_false", boundary_val_true_false)
        boundary_val = torch.tensor([sdm_unique[i] for i in range(len(boundary_val_true_false)) if boundary_val_true_false[i] == True])
        # boundary_val = np.intersect1d(boundary_val_less.cpu().numpy(),boundary_val_more.cpu().numpy())
        # print("bbval", boundary_val)

        # boundary_val = torch.from_numpy(boundary_val).float().cuda()
        # 返回 torch.return_types.sort(
        # values=tensor([-2.0000, -1.4142,  0.0000,  1.0000,  1.4142,  2.0000]),
        # indices=tensor([0, 1, 2, 3, 4, 5]))
        boundary_val = torch.sort(boundary_val)[0]
        # print("bounday", boundary_val)
        if is_label is True:
            # print("sdm_gts.unique()", sdm_gts.unique())
            batchsize = sdm_gts.shape[0]
            # print("sdm_gts", sdm_gts.shape)
            sameclass = [[] for i in range(len(boundary_val))]
            for i in range(batchsize):
                for bi in range(len(boundary_val)):
                    # 计算同一距离是同一类,distan_class
                    # print("sdm_gts[i]",sdm_gts[i])
                    # print("bounday",boundary_val)
                    x_axis, y_axis = torch.where(sdm_gts[i] == boundary_val[bi])
                    for j, k in zip(x_axis, y_axis):
                        sameclass[bi].append(seg_contra[i, :, j, k].cpu().detach().numpy())
                    # print("ddd {0}  {1}".format(i, boundary_val[bi]))

            pixelsNum = len(sameclass[0])
            for i in range(len(sameclass)):
                if pixelsNum > len(sameclass[i]):
                    pixelsNum = len(sameclass[i])
                # print("{0}  {1}".format(i, len(sameclass[i])))
            # 计划的每个距离取得像素数量 每个mask的每个距离取16 ,batchsize * 16就是一个批次同一距离的所有像素
            plan_pixelsNum = batchsize * 16
            if pixelsNum > plan_pixelsNum:
                pixelsNum = plan_pixelsNum

            # print("pixelNum==",pixelsNum)

            dis_class_num = len(sameclass)

            dis_feature = [[] for i in range(len(sameclass))]
            # 随机打乱并取出每个距离的像素
            for i in range(len(sameclass)):
                dis_num = len(sameclass[i])

                i_disnum_list = list(range(dis_num))
                # shuffle的输入也是引用返回
                np.random.shuffle(i_disnum_list)
                # print("i_disnum",i_disnum_list)
                for j in range(pixelsNum):
                    # i_disnum_list[j]作为随机下标
                    # print("dis_fea",type(dis_feature[i]))
                    # print("sacls",type(sameclass[i][i_disnum_list[j]]))

                    dis_feature[i].append(sameclass[i][i_disnum_list[j]])

            dis_feature =  np.array(dis_feature)
            # print("dis_feature.shape",dis_feature.shape)

            # tensor的 转换需要维度相同
            dis_feature = torch.tensor(dis_feature).float().cuda()
            # print("dis_feature.shape", dis_feature.shape)  # [6,64=4*16=pixelsNum,64=4*16=channel]

            kernels = dis_feature.unsqueeze(0) #size (bsz,  h, w,c)
            # print("kernels.shape", kernels.shape) # [1,6,64=4*16=pixelsNum,64=4*16=channel]

            contrast_feature = kernels.permute(0,3,1,2) # (bsz*v, c, h, w)   [1,64=channel,6,64=pixelsNum]

            # print("kernels.shape", kernels.shape)

            # 每个像素对应长度为contrast_feature.shape[1] 的向量
            kernels = kernels.reshape(-1, contrast_feature.shape[1], 1, 1)
            # print("kernels.shape", kernels.shape)   # [384, 64, 1, 1]

            # 张量和标量做逐元素除法
            # 或者两个可广播的张量之间做逐元素除法
            # F.conv2d(A,B)   A:[batch,inchannel,ih,iw]  B:[outchannel,inchannel/group,kh,kw],group 默认=1
            # print("contras_feature=",contrast_feature.unique())
            # print("kernels=", kernels.unique())
            # print("contra.shape",contrast_feature.shape)
            # print("kernel.shape",kernels.shape)
            logits = torch.div(F.conv2d(contrast_feature, kernels), temperature)
            # print("logits=", logits.unique())
            # print("logits==",logits.shape)
            # print("logits.sum",logits.sum())

            # print("logits.shape", logits.shape)
            logits = logits.permute(1, 0, 2, 3)
            # print("logits.shape", logits.shape)  #[384, 1, 6, 64]

            # 第i行的 所有值表示  第i个像素与其他所有像素的分别求内积
            # logits.shape:(bsz*v*h*w,bsz*v*h*w)
            logits = logits.reshape(logits.shape[0], -1)
            # print("logits.shape", logits.shape)

            if is_label is True:

                mask = torch.ones((pixelsNum,pixelsNum)).cuda()
                mask = self.generateBlockOneDiagonal(mask,dis_class_num)

                logits_mask =torch.scatter(
                    torch.ones_like(mask),
                    1, # 按列，即index的值作为列值索引 一般可以用来对标签进行one-hot 编码.
                    torch.arange(mask.shape[0]).view(-1, 1).cuda(),
                    0 # src的值都是0，用0来填充输出的位置
                )
                # print("mask.shape",mask.shape)
                # print("dis_class_num",dis_class_num)
            else :
                pass
            mask = logits_mask * mask  # 剔除自己和自己的内积
            # 把自己和自己内积排除
            exp_logits = torch.exp(logits) * logits_mask  # e**500太大
            # print("logits_mask",logits_mask.unique())
            # print("exp_loggits==",exp_logits.unique())
            # 把第1维串在一起求和(每一行求和)
            # logits 似乎没有经过log的计算？与公式不符？？
            # print("logits==",logits.unique())
            # print("logits.max{0}  logits.min{1} ".format(torch.max(logits.unique(),min(logits.unique()))))

            # print("exp_logits",exp_logits.unique())

            tmp_exp_log = torch.log(exp_logits.sum(1, keepdim=True))  # nan

            # print("exp_logits.max{0}  exp_logits.min{1}".format(torch.max(tmp_exp_log.unique()), min(tmp_exp_log.unique())))

            log_prob = logits - torch.log(exp_logits.sum(1, keepdim=True))
            # 除以每个正样本的个数, 以 distance=0为正样本
            mean_log_prob_pos = (mask * log_prob).sum(1) / mask.sum(1)

            # loss
            loss = - mean_log_prob_pos

            if is_label is True:
                loss = loss.mean()
            else:
                pass
            return loss
            # return random.randint(1,6)/10

        elif is_label is False:
            return None
        else:
            return None


    # 一圈一类策略。。每个距离相同的数量   seg_contra 经过了归一化
    def contras_loss_mean_3layer(self,sdm_gts,seg_contra,is_label=True,temperature=0.07):
        # sdm_gts:值是点到边界距离 [4, 256, 256]
        # seg_contra:[4, 64, 256, 256]
        # print("sdm_gts.unique()", sdm_gts.unique())
        # print("seg_contra.unique()", seg_contra.unique())

        sdm_unique = sdm_gts.unique()
        # print("sdm.shape",sdm_gts.shape)#4, 256, 256
        # exit()
        boundary_val_true_false = (sdm_unique <= 2) * (sdm_unique >= -2)

        # print("boundary_val_true_false", boundary_val_true_false)
        boundary_val = torch.tensor([sdm_unique[i] for i in range(len(boundary_val_true_false)) if boundary_val_true_false[i] == True])
        # boundary_val = np.intersect1d(boundary_val_less.cpu().numpy(),boundary_val_more.cpu().numpy())
        # print("bbval", boundary_val)

        # boundary_val = torch.from_numpy(boundary_val).float().cuda()
        # 返回 torch.return_types.sort(
        # values=tensor([-2.0000, -1.4142,  0.0000,  1.0000,  1.4142,  2.0000]),
        # indices=tensor([0, 1, 2, 3, 4, 5]))
        boundary_val = torch.sort(boundary_val)[0]
        # print("bounday", boundary_val)
        if is_label is True:
            # print("sdm_gts.unique()", sdm_gts.unique())
            batchsize = sdm_gts.shape[0]
            # print("sdm_gts", sdm_gts.shape)
            sameclass = [[] for i in range(len(boundary_val))]
            for i in range(batchsize):
                for bi in range(len(boundary_val)):
                    # 计算同一距离是同一类,distan_class
                    # print("sdm_gts[i]",sdm_gts[i])
                    # print("bounday",boundary_val)
                    x_axis, y_axis = torch.where(sdm_gts[i] == boundary_val[bi])
                    print("x_axis.shape y_axis.shape",x_axis.shape,y_axis.shape)
                    exit()
                    for j, k in zip(x_axis, y_axis):
                        sameclass[bi].append(seg_contra[i, :, j, k].cpu().detach().numpy())
                    # print("ddd {0}  {1}".format(i, boundary_val[bi]))

            pixelsNum = len(sameclass[0])
            for i in range(len(sameclass)):
                if pixelsNum > len(sameclass[i]):
                    pixelsNum = len(sameclass[i])
                # print("{0}  {1}".format(i, len(sameclass[i])))
            # 计划的每个距离取得像素数量 每个mask的每个距离取16 ,batchsize * 16就是一个批次同一距离的所有像素
            plan_pixelsNum = batchsize * 16
            if pixelsNum > plan_pixelsNum:
                pixelsNum = plan_pixelsNum

            # print("pixelNum==",pixelsNum)

            dis_class_num = len(sameclass)

            dis_feature = [[] for i in range(len(sameclass))]
            # 随机打乱并取出每个距离的像素
            for i in range(len(sameclass)):
                dis_num = len(sameclass[i])

                i_disnum_list = list(range(dis_num))
                # shuffle的输入也是引用返回
                np.random.shuffle(i_disnum_list)
                # print("i_disnum",i_disnum_list)
                for j in range(pixelsNum):
                    # i_disnum_list[j]作为随机下标
                    # print("dis_fea",type(dis_feature[i]))
                    # print("sacls",type(sameclass[i][i_disnum_list[j]]))

                    dis_feature[i].append(sameclass[i][i_disnum_list[j]])

            dis_feature =  np.array(dis_feature)
            # print("dis_feature.shape",dis_feature.shape)

            # tensor的 转换需要维度相同
            dis_feature = torch.tensor(dis_feature).float().cuda()
            # print("dis_feature.shape", dis_feature.shape)  # [6,64=4*16=pixelsNum,64=4*16=channel]

            kernels = dis_feature.unsqueeze(0) #size (bsz,  h, w,c)
            # print("kernels.shape", kernels.shape) # [1,6,64=4*16=pixelsNum,64=4*16=channel]

            contrast_feature = kernels.permute(0,3,1,2) # (bsz*v, c, h, w)   [1,64=channel,6,64=pixelsNum]

            # print("kernels.shape", kernels.shape)

            # 每个像素对应长度为contrast_feature.shape[1] 的向量
            kernels = kernels.reshape(-1, contrast_feature.shape[1], 1, 1)
            # print("kernels.shape", kernels.shape)   # [384, 64, 1, 1]

            # 张量和标量做逐元素除法
            # 或者两个可广播的张量之间做逐元素除法
            # F.conv2d(A,B)   A:[batch,inchannel,ih,iw]  B:[outchannel,inchannel/group,kh,kw],group 默认=1
            # print("contras_feature=",contrast_feature.unique())
            # print("kernels=", kernels.unique())
            # print("contra.shape",contrast_feature.shape)
            # print("kernel.shape",kernels.shape)
            logits = torch.div(F.conv2d(contrast_feature, kernels), temperature)
            # print("logits=", logits.unique())
            # print("logits==",logits.shape)
            # print("logits.sum",logits.sum())

            # print("logits.shape", logits.shape)
            logits = logits.permute(1, 0, 2, 3)
            # print("logits.shape", logits.shape)  #[384, 1, 6, 64]

            # 第i行的 所有值表示  第i个像素与其他所有像素的分别求内积
            # logits.shape:(bsz*v*h*w,bsz*v*h*w)
            logits = logits.reshape(logits.shape[0], -1)
            # print("logits.shape", logits.shape)

            if is_label is True:

                mask = torch.ones((pixelsNum,pixelsNum)).cuda()
                mask = self.generateBlockOneDiagonal(mask,dis_class_num)

                logits_mask =torch.scatter(
                    torch.ones_like(mask),
                    1, # 按列，即index的值作为列值索引 一般可以用来对标签进行one-hot 编码.
                    torch.arange(mask.shape[0]).view(-1, 1).cuda(),
                    0 # src的值都是0，用0来填充输出的位置
                )
                # print("mask.shape",mask.shape)
                # print("dis_class_num",dis_class_num)
            else :
                pass
            mask = logits_mask * mask  # 剔除自己和自己的内积
            # 把自己和自己内积排除
            exp_logits = torch.exp(logits) * logits_mask  # e**500太大
            # print("logits_mask",logits_mask.unique())
            # print("exp_loggits==",exp_logits.unique())
            # 把第1维串在一起求和(每一行求和)
            # logits 似乎没有经过log的计算？与公式不符？？
            # print("logits==",logits.unique())
            # print("logits.max{0}  logits.min{1} ".format(torch.max(logits.unique(),min(logits.unique()))))

            # print("exp_logits",exp_logits.unique())

            tmp_exp_log = torch.log(exp_logits.sum(1, keepdim=True))  # nan

            # print("exp_logits.max{0}  exp_logits.min{1}".format(torch.max(tmp_exp_log.unique()), min(tmp_exp_log.unique())))

            log_prob = logits - torch.log(exp_logits.sum(1, keepdim=True))
            # 除以每个正样本的个数, 以 distance=0为正样本
            mean_log_prob_pos = (mask * log_prob).sum(1) / mask.sum(1)

            # loss
            loss = - mean_log_prob_pos

            if is_label is True:
                loss = loss.mean()
            else:
                pass
            return loss
            # return random.randint(1,6)/10

        elif is_label is False:
            return None
        else:
            return None



    # 一圈一类策略。只取一层。   seg_contra 经过了归一化
    def contras_loss_mean_1layer(self, sdm_gts, seg_contra, is_label=True, temperature=0.07):
        # sdm_gts:值是点到边界距离 [4, 256, 256]
        # seg_contra:[4, 64, 256, 256]
        # print("sdm_gts.unique()", sdm_gts.unique())
        # print("seg_contra.unique()", seg_contra.unique())

        sdm_unique = sdm_gts.unique()
        boundary_val_true_false = (sdm_unique <= 1) * (sdm_unique >= -1)

        # print("boundary_val_true_false", boundary_val_true_false)
        boundary_val = torch.tensor([sdm_unique[i] for i in range(len(boundary_val_true_false)) if boundary_val_true_false[i] == True])
        # boundary_val = np.intersect1d(boundary_val_less.cpu().numpy(),boundary_val_more.cpu().numpy())
        # print("bbval", boundary_val)

        # boundary_val = torch.from_numpy(boundary_val).float().cuda()
        # 返回 torch.return_types.sort(
        # values=tensor([-2.0000, -1.4142,  0.0000,  1.0000,  1.4142,  2.0000]),
        # indices=tensor([0, 1, 2, 3, 4, 5]))
        boundary_val = torch.sort(boundary_val)[0]
        # print("bounday", boundary_val)
        if is_label is True:
            # print("sdm_gts.unique()", sdm_gts.unique())
            batchsize = sdm_gts.shape[0]
            # print("sdm_gts", sdm_gts.shape)
            sameclass = [[] for i in range(len(boundary_val))]
            for i in range(batchsize):
                for bi in range(len(boundary_val)):
                    # 计算同一距离是同一类,distan_class
                    # print("sdm_g22ts[i]",sdm_gts[i])
                    # print("bounday",boundary_val)
                    x_axis, y_axis = torch.where(sdm_gts[i] == boundary_val[bi])
                    for j, k in zip(x_axis, y_axis):
                        sameclass[bi].append(seg_contra[i, :, j, k].cpu().detach().numpy())
                    # print("ddd {0}  {1}".format(i, boundary_val[bi]))

            pixelsNum = len(sameclass[0])
            for i in range(len(sameclass)):
                if pixelsNum > len(sameclass[i]):
                    pixelsNum = len(sameclass[i])
                # print("{0}  {1}".format(i, len(sameclass[i])))
            # 计划的每个距离取得像素数量 每个mask的每个距离取16 ,batchsize * 16就是一个批次同一距离的所有像素
            plan_pixelsNum = batchsize * 16
            if pixelsNum > plan_pixelsNum:
                pixelsNum = plan_pixelsNum

            # print("pixelNum==",pixelsNum)

            dis_class_num = len(sameclass)

            dis_feature = [[] for i in range(len(sameclass))]
            # 随机打乱并取出每个距离的像素
            for i in range(len(sameclass)):
                dis_num = len(sameclass[i])

                i_disnum_list = list(range(dis_num))
                # shuffle的输入也是引用返回
                np.random.shuffle(i_disnum_list)
                # print("i_disnum",i_disnum_list)
                for j in range(pixelsNum):
                    # i_disnum_list[j]作为随机下标
                    # print("dis_fea",type(dis_feature[i]))
                    # print("sacls",type(sameclass[i][i_disnum_list[j]]))

                    dis_feature[i].append(sameclass[i][i_disnum_list[j]])

            dis_feature =  np.array(dis_feature)
            # print("dis_feature.shape",dis_feature.shape)

            # tensor的 转换需要维度相同
            dis_feature = torch.tensor(dis_feature).float().cuda()
            # print("dis_feature.shape", dis_feature.shape)  # [6,64=4*16=pixelsNum,64=4*16=channel]

            kernels = dis_feature.unsqueeze(0) #size (bsz,  h, w,c)
            # print("kernels.shape", kernels.shape) # [1,6,64=4*16=pixelsNum,64=4*16=channel]

            contrast_feature = kernels.permute(0,3,1,2) # (bsz*v, c, h, w)   [1,64=channel,6,64=pixelsNum]

            # print("kernels.shape", kernels.shape)

            # 每个像素对应长度为contrast_feature.shape[1] 的向量
            kernels = kernels.reshape(-1, contrast_feature.shape[1], 1, 1)
            # print("kernels.shape", kernels.shape)   # [384, 64, 1, 1]

            # 张量和标量做逐元素除法
            # 或者两个可广播的张量之间做逐元素除法
            # F.conv2d(A,B)   A:[batch,inchannel,ih,iw]  B:[outchannel,inchannel/group,kh,kw],group 默认=1
            # print("contras_feature=",contrast_feature.unique())
            # print("kernels=", kernels.unique())
            # print("contra.shape",contrast_feature.shape)
            # print("kernel.shape",kernels.shape)
            logits = torch.div(F.conv2d(contrast_feature, kernels), temperature)
            # print("logits=", logits.unique())
            # print("logits==",logits.shape)
            # print("logits.sum",logits.sum())

            # print("logits.shape", logits.shape)
            logits = logits.permute(1, 0, 2, 3)
            # print("logits.shape", logits.shape)  #[384, 1, 6, 64]

            # 第i行的 所有值表示  第i个像素与其他所有像素的分别求内积
            # logits.shape:(bsz*v*h*w,bsz*v*h*w)
            logits = logits.reshape(logits.shape[0], -1)
            # print("logits.shape", logits.shape)

            if is_label is True:

                mask = torch.ones((pixelsNum,pixelsNum)).cuda()
                mask = self.generateBlockOneDiagonal(mask,dis_class_num)

                logits_mask =torch.scatter(
                    torch.ones_like(mask),
                    1, # 按列，即index的值作为列值索引 一般可以用来对标签进行one-hot 编码.
                    torch.arange(mask.shape[0]).view(-1, 1).cuda(),
                    0 # src的值都是0，用0来填充输出的位置
                )
                # print("mask.shape",mask.shape)
                # print("dis_class_num",dis_class_num)
            else :
                pass
            mask = logits_mask * mask  # 剔除自己和自己的内积
            # 把自己和自己内积排除
            exp_logits = torch.exp(logits) * logits_mask  # e**500太大
            # print("logits_mask",logits_mask.unique())
            # print("exp_loggits==",exp_logits.unique())
            # 把第1维串在一起求和(每一行求和)
            # logits 似乎没有经过log的计算？与公式不符？？
            # print("logits==",logits.unique())
            # print("logits.max{0}  logits.min{1} ".format(torch.max(logits.unique(),min(logits.unique()))))

            # print("exp_logits",exp_logits.unique())

            tmp_exp_log = torch.log(exp_logits.sum(1, keepdim=True))  # nan

            # print("exp_logits.max{0}  exp_logits.min{1}".format(torch.max(tmp_exp_log.unique()), min(tmp_exp_log.unique())))

            log_prob = logits - torch.log(exp_logits.sum(1, keepdim=True))
            # 除以每个正样本的个数, 以 distance=0为正样本
            mean_log_prob_pos = (mask * log_prob).sum(1) / mask.sum(1)

            # loss
            loss = - mean_log_prob_pos

            if is_label is True:
                loss = loss.mean()
            else:
                pass
            return loss
            # return random.randint(1,6)/10

        elif is_label is False:
            return None
        else:
            return None

    def generateBlockOneDiagonal(self,block,block_num):
        block = block.cpu().numpy()
        lena = len(block)
        n = block_num
        lis = [i for i in range(n)]
        for i in range(n):
            lis[i] = np.row_stack((np.zeros((i * lena, lena)), block, np.zeros(((n - i - 1) * lena, lena))))

        # res = np.hstack([lis[0],lis[1],lis[2],lis[3],lis[4],lis[5]])
        res = np.hstack(lis)

        return torch.tensor(res).cuda()

class DiceLossMulti(nn.Module):
    def __init__(self, n_classes):
        super(DiceLossMulti, self).__init__()
        self.n_classes = n_classes

    def _one_hot_encoder(self, input_tensor):
        tensor_list = []
        for i in range(self.n_classes):
            temp_prob = input_tensor == i * torch.ones_like(input_tensor)
            tensor_list.append(temp_prob)
        output_tensor = torch.cat(tensor_list, dim=1)
        return output_tensor.float()

    def _dice_loss(self, score, target):
        target = target.float()
        smooth = 1e-5
        intersect = torch.sum(score * target)
        y_sum = torch.sum(target * target)
        z_sum = torch.sum(score * score)
        loss = (2 * intersect + smooth) / (z_sum + y_sum + smooth)
        loss = 1 - loss
        return loss

    def forward(self, inputs, target, weight=None, softmax=False):
        if softmax:
            inputs = torch.softmax(inputs, dim=1)
        target = self._one_hot_encoder(target)
        if weight is None:
            weight = [1] * self.n_classes
        assert inputs.size() == target.size(), 'predict & target shape do not match'
        class_wise_dice = []
        loss = 0.0
        for i in range(0, self.n_classes):
            dice = self._dice_loss(inputs[:, i], target[:, i])
            class_wise_dice.append(1.0 - dice.item())
            loss += dice * weight[i]
        return loss / self.n_classes