from typing import List, Dict, Callable, Iterable
import torch
from torch import Tensor
from torch import nn
import torch.nn.functional as F

class CosineSimilarityLoss(nn.Module):
    """
    CosineSimilarityLoss expects, that the InputExamples consists of two texts and a float label.

    It computes the vectors u = model(input_text[0]) and v = model(input_text[1]) and measures the cosine-similarity between the two.
    By default, it minimizes the following loss: ||input_label - cos_score_transformation(cosine_sim(u,v))||_2.

    :param loss_fct: Which pytorch loss function should be used to compare the cosine_similartiy(u,v) with the input_label? By default, MSE:  ||input_label - cosine_sim(u,v)||_2
    :param cos_score_transformation: The cos_score_transformation function is applied on top of cosine_similarity. By default, the identify function is used (i.e. no change).

    Example::
        from sentence_transformers import SentenceTransformer, SentencesDataset, InputExample, losses

        train_examples = [InputExample(texts=['My first sentence', 'My second sentence'], label=0.8),
            InputExample(texts=['Another pair', 'Unrelated sentence'], label=0.3)]
        train_loss = losses.CosineSimilarityLoss(model=model)
    """
    def __init__(self, loss_fct = nn.MSELoss(), cos_score_transformation=nn.Identity(), temp=1.0):
        '''
        :param temp
        Dot product or cosine similarity(采用的simCSE的对比学习损失函数)
        https://github.com/princeton-nlp/SimCSE/blob/511c99d4679439c582beb86a0372c04865610b6b/simcse/models.py#L35
        '''
        super(CosineSimilarityLoss, self).__init__()
        self.temp = temp
        self.loss_fct = loss_fct
        self.cos_score_transformation = cos_score_transformation


    def forward(self, sentence_embeddings: List[Tensor], labels: Tensor=None):
        output = self.cos_score_transformation(torch.cosine_similarity(sentence_embeddings[0], sentence_embeddings[1]) / self.temp)
        if labels is not None:
            loss = self.loss_fct(output, labels.view(-1))
            return output, loss
        else:
            return output, None


class SoftmaxLoss(nn.Module):
    """
    This loss was used in our SBERT publication (https://arxiv.org/abs/1908.10084) to train the SentenceTransformer
    model on NLI data. It adds a softmax classifier on top of the output of two transformer networks.

    :param sentence_embedding_dimension: Dimension of your sentence embeddings
    :param num_labels: Number of different labels
    :param concatenation_sent_rep: Concatenate vectors u,v for the softmax classifier?
    :param concatenation_sent_difference: Add abs(u-v) for the softmax classifier?
    :param concatenation_sent_multiplication: Add u*v for the softmax classifier?
    :param loss_fct: Optional: Custom pytorch loss function. If not set, uses nn.CrossEntropyLoss()

    Example::

        from sentence_transformers import SentenceTransformer, SentencesDataset, losses
        from sentence_transformers.readers import InputExample

        train_examples = [InputExample(texts=['First pair, sent A', 'First pair, sent B'], label=0),
            InputExample(texts=['Second Pair, sent A', 'Second Pair, sent B'], label=3)]
        train_dataset = SentencesDataset(train_examples, model)
        train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=train_batch_size)
        train_loss = losses.SoftmaxLoss(sentence_embedding_dimension=model.get_sentence_embedding_dimension(), num_labels=train_num_labels)
    """
    def __init__(self,
                 sentence_embedding_dimension: int,
                 num_labels: int,
                 concatenation_sent_rep: bool = True,
                 concatenation_sent_difference: bool = True,
                 concatenation_sent_multiplication: bool = False,
                 loss_fct: Callable = nn.CrossEntropyLoss()):
        super(SoftmaxLoss, self).__init__()
        self.num_labels = num_labels
        self.concatenation_sent_rep = concatenation_sent_rep
        self.concatenation_sent_difference = concatenation_sent_difference
        self.concatenation_sent_multiplication = concatenation_sent_multiplication

        num_vectors_concatenated = 0
        if concatenation_sent_rep:
            num_vectors_concatenated += 2
        if concatenation_sent_difference:
            num_vectors_concatenated += 1
        if concatenation_sent_multiplication:
            num_vectors_concatenated += 1
        self.classifier = nn.Linear(num_vectors_concatenated * sentence_embedding_dimension, num_labels)
        self.loss_fct = loss_fct

    def forward(self, sentence_embeddings: List[Tensor], labels: Tensor=None):
        rep_a, rep_b = sentence_embeddings[0], sentence_embeddings[1]

        vectors_concat = []
        if self.concatenation_sent_rep:
            vectors_concat.append(rep_a)
            vectors_concat.append(rep_b)

        if self.concatenation_sent_difference:
            vectors_concat.append(torch.abs(rep_a - rep_b))

        if self.concatenation_sent_multiplication:
            vectors_concat.append(rep_a * rep_b)

        features = torch.cat(vectors_concat, 1)

        output = self.classifier(features)

        if labels is not None:
            loss = self.loss_fct(output, labels.view(-1))
            return output, loss
        else:
            return output, None


#多分类使用类别权重
class CrossEntropyLoss_class_weighted(nn.Module):
    def __init__(self, weights: torch.Tensor=None, reduction: str='mean', ignore_index: int=-100):  # type: ignore
        super().__init__()
        self.name = 'CrossEntropyLoss_class_weighted'
        self.cerition = nn.CrossEntropyLoss(weight=weights, reduction=reduction, ignore_index=ignore_index)

    def forward(self, input, targets):
        loss = self.cerition(input, targets)
        return loss


class BCELoss_class_weighted(nn.Module):
    """
    https://blog.csdn.net/qq_38395570/article/details/125527130
    https://blog.csdn.net/guligedong/article/details/122358507
    """
    def __init__(self, weight=torch.tensor([1,1]), pos_weight=None):
        super().__init__()
        self.name = 'BCELoss_class_weighted'
        self.weight = weight # 二分类中正负样本的权重，第一项为负类权重，第二项为正类权重
        self.pos_weight = pos_weight # 参考BCEWithLogitsLoss进行的简单修改，并未完全设计复现出

    def forward(self, input, target):
        # 这个地方，不知道为什么对model的训练影响很大。目前还不清楚默认的BCEloss有没有用到这个
        input = torch.clamp(input, min=1e-6, max=1-1e-6) # 最小值如果小于0，log会呈现负无穷
        
        if self.pos_weight:
            bce = - self.weight[1] * target * self.pos_weight * torch.log(input) - (1 - target) * self.weight[0] * torch.log(1 - input)
        else:
            bce = - self.weight[1] * target * torch.log(input) - (1 - target) * self.weight[0] * torch.log(1 - input)
        return bce  # 根据情况选择是torch.mean(bce)还是torch.sum(bce)


class FocalLoss(nn.Module):
    def __init__(self, gamma=2, alpha=None, num_classes=3, size_average=True):
        '''
        reference:https://github.com/yatengLG/Focal-Loss-Pytorch
        focal_loss损失函数, -α(1-yi)**γ *ce_loss(xi,yi)
        步骤详细的实现了 focal_loss损失函数.
        :param alpha:   阿尔法α,类别权重.      当α是列表时,为各类别权重,当α为常数时,类别权重为[α, 1-α, 1-α, ....],常用于 目标检测算法中抑制背景类 , retainnet中设置为0.25
        :param gamma:   伽马γ,难易样本调节参数. retainnet中设置为2
        :param num_classes:     类别数量
        :param size_average:    损失计算方式,默认取均值
        '''
        super(FocalLoss, self).__init__()
        self.gamma = gamma
        if isinstance(alpha,list):
            assert len(alpha)==num_classes   # α可以以list方式输入,size:[num_classes] 用于对不同类别精细地赋予权重
            # print(" --- Focal_loss alpha = {}, 将对每一类权重进行精细化赋值 --- ".format(alpha))
            self.alpha = torch.Tensor(alpha)
        elif isinstance(alpha, (int, float)) and num_classes>1:
            assert alpha<1   #如果α为一个常数,则降低第一类的影响,在目标检测中为第一类
            # print(" --- Focal_loss alpha = {} ,将对背景类进行衰减,请在目标检测任务中使用 --- ".format(alpha))
            self.alpha = torch.zeros(num_classes)
            self.alpha[0] += alpha
            self.alpha[1:] += (1-alpha) # α 最终为 [ α, 1-α, 1-α, 1-α, 1-α, ...] size:[num_classes]
        else:
            self.alpha = alpha
        self.size_average = size_average

    def forward(self, input, target):
        '''
        Args:
            input: shape = (B,NUM_Label)
            target: shape= (B)
        '''

        target = target.view(-1,1)
        logpt = F.log_softmax(input,dim=-1)
        logpt = logpt.gather(1,target) # 这部分实现nll_loss ( crossempty = log_softmax + nll )
        logpt = logpt.view(-1)
        pt = logpt.exp()

        loss = -torch.mul(torch.pow((1-pt), self.gamma) , logpt)

        if self.alpha is not None:
            self.alpha = self.alpha.to(target.device)
            self.alpha = self.alpha.gather(0, target.view(-1))
            loss = torch.mul(self.alpha, loss)

        if self.size_average:
            return loss.mean()
        else: 
            return loss.sum()