import pdb

import torch
from torch import nn
from torch.nn import functional as F
class MMCL(nn.Module):
    def __init__(self, delta=5.0, r=0.001,cfg=None):
        super(MMCL, self).__init__()
        self.device=cfg.DEVICE
        self.delta = delta # coefficient for mmcl
        print('delta:',self.delta)
        self.r = r         # hard negative mining ratio
        self.use_oem = cfg.USE_OEM

    def forward(self, inputs, targets, is_vec=False,mem=None,input_feats=None):
        m, n = inputs.size()

        if is_vec:
            multilabel = targets
        else:
            targets = torch.unsqueeze(targets, 1)
            multilabel = torch.zeros(inputs.size()).to(self.device)
            multilabel.scatter_(1, targets, float(1))

        loss = []
        for i in range(m):
            logit = inputs[i]  # 单个样本和所有mem的余弦相似度,概率，几率
            label = multilabel[i]  # 对应多标签
            # 得到 正样本和难负样本标签
            pos_logit = torch.masked_select(logit, label > 0.5)  # 1（大于0.5）的是正pos，自己是2
            neg_logit = torch.masked_select(logit, label < 0.5)  # 无标签的负neg= all-pos
            # 挖掘难负样本
            _, idx = torch.sort(neg_logit.detach().clone(), descending=True)  # 负样本排序
            num = int(self.r * neg_logit.size(0))  # 负样本选择比例0.01，如果prw是1.8万，则有180负,ssm2.9对应290,有点多感觉
            mask = torch.zeros(neg_logit.size(), dtype=torch.bool).to(self.device)
            mask[idx[0:num]] = 1  # 负样本的标签
            # print(i,': hard ', num)
            hard_neg_logit = torch.masked_select(neg_logit, mask)

            l = self.delta * torch.mean((1 - pos_logit).pow(2)) + torch.mean(
                (-1 - hard_neg_logit).pow(2))  # 均方误差 概率和1标签

            if self.use_oem: # 或者只对无标签的进行oem
                pos_max,index = torch.max(targets,dim=1)
                # multi_labs = torch.argmax(targets, dim=1)
                # if pos_max[i]==1:
                #     continue #最大值是2的话才是no id
                self_pos = inputs[i][index[i]].unsqueeze(0)
                neg_with_self = torch.cat((neg_logit, self_pos), 0)
                targ = len(neg_with_self) - 1
                targ = torch.tensor(targ).to(self.device)
                loss_oem = F.cross_entropy(neg_with_self.unsqueeze(0), targ.unsqueeze(0))  # label 是自己所在的index
                l = l + loss_oem

            loss.append(l)

        loss = torch.mean(torch.stack(loss))
        return loss

    def forward_swich(self, inputs, targets, is_vec=False,mem=None,input_feats=None):
        m, n = inputs.size()

        if is_vec:
            multilabel = targets
        else:
            targets = torch.unsqueeze(targets, 1)
            multilabel = torch.zeros(inputs.size()).to(self.device)
            multilabel.scatter_(1, targets, float(1))

        use_ps_pos_neg=False
        if not use_ps_pos_neg:
            loss = []
            use_cenloss = False
            use_oem = True
            for i in range(m):
                logit = inputs[i]  # 单个样本和所有mem的余弦相似度,概率，几率
                label = multilabel[i]  # 对应多标签
                # 得到 正样本和难负样本标签
                pos_logit = torch.masked_select(logit, label > 0.5)  # 1（大于0.5）的是正pos，
                neg_logit = torch.masked_select(logit, label < 0.5)  # 无标签的负neg= all-pos
                # 挖掘难负样本
                _, idx = torch.sort(neg_logit.detach().clone(), descending=True)  # 负样本排序
                num = int(self.r * neg_logit.size(0))  # 负样本选择比例0.01，如果prw是1.8万，则有180负,ssm2.9对应290,有点多感觉
                mask = torch.zeros(neg_logit.size(), dtype=torch.bool).to(self.device)
                mask[idx[0:num]] = 1  # 负样本的标签
                # print(i,': hard ', num)
                hard_neg_logit = torch.masked_select(neg_logit, mask)

                # 距离度量 欧式距离 （feat[i] - cen[i])平方,再开平方=a2+b2-2ab
                if use_cenloss:
                    # 传入mem，计算正和难负|的中心
                    p_inds = label > 0.5
                    pos_vec = mem[p_inds]
                    pos_center = torch.mean(pos_vec, 0)

                    feat = input_feats[i]
                    cha_2 = torch.pow(feat - pos_center, 2).sum()
                    dd_loss = torch.sqrt(cha_2)

                    l = self.delta * dd_loss + torch.mean(
                        (1 + hard_neg_logit).pow(2))  # 均方误差 概率和1标签

                else:
                    l = self.delta * torch.mean((1 - pos_logit).pow(2)) + torch.mean(
                        (1 + hard_neg_logit).pow(2))  # 均方误差 概率和1标签

                    use_pos_oem = False
                    if use_pos_oem:
                        multi_labs = torch.argmax(targets, dim=1)
                        lab = multi_labs[i]
                        self_pos = inputs[i][lab].unsqueeze(0)
                        neg_with_self = torch.cat((neg_logit, self_pos), 0)
                        targ = len(neg_with_self) - 1
                        targ = torch.tensor(targ).to(self.device)
                        loss_oem = F.cross_entropy(neg_with_self.unsqueeze(0), targ.unsqueeze(0))  # label 是自己所在的index
                        l = l + loss_oem

                loss.append(l)

            if use_oem:
                lab = torch.argmax(targets, dim=1)
                loss_oem = F.cross_entropy(inputs, lab)  # label 是自己所在的index

                loss = torch.mean(torch.stack(loss)) + loss_oem
            else:
                loss = torch.mean(torch.stack(loss))

        else:
            loss = []
            for i in range(m):
                logit = inputs[i]  # 单个样本和所有mem的余弦相似度,概率，几率
                label = multilabel[i]  # 对应多标签
                # 得到 正样本和难负样本标签 [-1 hard 0 neg 1 pos
                pos_logit = torch.masked_select(logit, label == 1)  # 1（大于0.5）的是正pos，
                hard_neg_logit = torch.masked_select(logit, label == -1)  # 无标签的负neg= all-pos

                l = self.delta * torch.mean((1 - pos_logit).pow(2)) + torch.mean(
                    (1 + hard_neg_logit).pow(2))  # 均方误差 概率和1标签
                loss.append(l)
            loss = torch.mean(torch.stack(loss))

        return loss

    def forward_soft_mul(self, inputs, targets, is_vec=False,mem=None,input_feats=None):
        m, n = inputs.size()

        if is_vec:
            multilabel = targets
        else:
            targets = torch.unsqueeze(targets, 1)
            multilabel = torch.zeros(inputs.size()).to(self.device)
            multilabel.scatter_(1, targets, float(1))

        loss = []
        for i in range(m):
            logit = inputs[i]  # 单个样本和所有mem的余弦相似度,概率，几率
            label = multilabel[i]  # 对应多标签
            # 得到 正样本和难负样本标签
            pos_logit = torch.masked_select(logit, label > 0.5)  # 1（大于0.5）的是正pos，
            neg_logit = torch.masked_select(logit, label < 0.5)  # 无标签的负neg= all-pos
            # 挖掘难负样本
            _, idx = torch.sort(neg_logit.detach().clone(), descending=True)  # 负样本排序
            num = int(self.r * neg_logit.size(0))  # 负样本选择比例0.01，如果prw是1.8万，则有180负,ssm2.9对应290,有点多感觉
            mask = torch.zeros(neg_logit.size(), dtype=torch.bool).to(self.device)
            mask[idx[0:num]] = 1  # 负样本的标签
            # print(i,': hard ', num)
            hard_neg_logit = torch.masked_select(neg_logit, mask)

            l = self.delta * torch.mean((1 - pos_logit).pow(2)) + torch.mean(
                (1 + hard_neg_logit).pow(2))  # 均方误差 概率和1标签

            loss.append(l)
        use_oem=True
        if use_oem:
            lab = torch.argmax(targets, dim=1)
            loss_oem = F.cross_entropy(inputs, lab)  # label 是自己所在的index

            loss = torch.mean(torch.stack(loss)) + loss_oem
        else:
            loss = torch.mean(torch.stack(loss))

        return loss

