import numpy as np
import torch
from torch import nn
from torch.nn import functional as F

from losses.SupCL import SupConLoss


def normalize_size(tensor):
    # 将输入的张量重新调整为适合计算的形状，使内存地址连续  降维、展平
    if len(tensor.size()) == 3:
        tensor = tensor.contiguous().view(-1, tensor.size(2))
    elif len(tensor.size()) == 2:
        tensor = tensor.contiguous().view(-1)

    return tensor


def calculate_entity_loss(pred_start, pred_end, gold_start, gold_end):
    # 计算实体识别任务的损失
    pred_start = normalize_size(pred_start)
    pred_end = normalize_size(pred_end)
    gold_start = normalize_size(gold_start)
    gold_end = normalize_size(gold_end)

    # 类别权重，用于处理类别不平衡 FP  FN
    weight = torch.tensor([1, 5]).float().to('cuda' if torch.cuda.is_available() else 'cpu')

    loss_start = F.cross_entropy(pred_start, gold_start.long(), reduction='sum', weight=weight, ignore_index=-1)
    loss_end = F.cross_entropy(pred_end, gold_end.long(), reduction='sum', weight=weight, ignore_index=-1)

    # 将起始和结束位置的损失取平均
    return 0.5 * loss_start + 0.5 * loss_end


def calculate_classification_loss(pred, gold, weight=None, ignore_index=-1):
    """
    通用分类损失函数，适用于类别、情感和强度分类。

    参数：
    - pred: 模型预测输出
    - gold: 真实标签
    - weight: 类别权重（可选）
    - ignore_index: 忽略的标签索引（默认-1）

    返回：
    - 分类损失值
    """
    return F.cross_entropy(pred, gold.long(), reduction='sum', weight=weight, ignore_index=ignore_index)


def calculate_scl_loss(gold, pred_scores):
    # 计算了监督对比学习的损失
    SCL = SupConLoss(contrast_mode='all', temperature=0.9)

    answer = gold
    idxs = torch.nonzero(answer != -1).squeeze()

    answers, score_list = [], []
    for i in idxs:
        answers.append(answer[i])
        score_list.append(pred_scores[i])

    # label 维度变为:[trans_dim]
    answers = torch.stack(answers)
    # 进行维度重构 category维度:[category_nums, 1, trans_dim]
    scores = torch.stack(score_list)

    scores = F.softmax(scores, dim=1)
    scores = scores.unsqueeze(1)

    scl_loss = SCL(scores, answers)
    scl_loss /= len(scores)

    return scl_loss


class FocalLoss(nn.Module):
    def __init__(self, gamma=1, weight=None, ignore_index=-1):
        super(FocalLoss, self).__init__()
        self.weight = weight              # 可选参数，对每个类别的样本加权
        self.gamma = gamma                # 调节因子，控制模型对难分类样本的关注程度
        self.ignore_index = ignore_index  # 忽略-1目标的损失值计算

    def forward(self, pred, gold):
        log_pt = F.log_softmax(pred, dim=1)
        pt = torch.exp(log_pt)
        log_pt = (1 - pt) ** self.gamma * log_pt
        loss = F.nll_loss(log_pt, gold, self.weight, ignore_index=self.ignore_index)
        return loss
