import torch
import torch.nn.functional as F

def calculate_accuracy(logits, labels, ignore_index=-100):
    # 不取句子的最后一个词，再展成二维[一个bach所有单词数，对应的概率]
    logits = logits[:, :-1, :].contiguous().view(-1, logits.size(-1))

    # 不取句子的第一个词，再展成一维
    labels = labels[:, 1:, :].contiguous().view(-1)

    # 获取预测单词最大概率的索引值
    values, indexes = logits.max(dim=-1)

    # 将索引重新赋予logits
    logits = indexes

    # 通过非运算获取没有被mask的矩阵
    non_pad_mask = labels.ne(ignore_index)

    # 预测正确量（比较logits和labels是否相等，再提取实际句子【非mask】，最后求和）
    n_correct = logits.eq(labels).masked_select(non_pad_mask).sum().item()

    # 预测总量
    n_word = non_pad_mask.sum().item()

    return n_correct, n_word

def calculate_loss(logit, target, ignore_index, smoothing=False):
    '''
    计算模型的损失
    :param logit: 模型预测结果
    :param target: 真实标签
    :param ignore_index: 特殊-100忽略计算损失的值
    :param smoothing: 不是核心，是计算损失的优化方法
    '''
    if smoothing:
        logit = logit[..., :-1, :].contiguous().view(-1, logit.size(2))
        target = target[..., 1:].contiguous().view(-1)

        eps = 0.1
        n_class = logit.size(-1)
        # torch.Size([4, 219, 13317])

        # 确保target值在有效范围内
        valid_target_mask = (target >= 0) & (target < n_class)
        target = target.masked_fill(~valid_target_mask, 0)

        one_hot = torch.zeros_like(logit).scatter(1, target.view(-1, 1), 1)
        one_hot = one_hot * (1 - eps) + (1 - one_hot) * eps / (n_class - 1)

        log_prb = F.log_softmax(logit, dim=1)
        non_pad_mask = target.ne(ignore_index)
        loss = -(one_hot * log_prb).sum(dim=1)
        loss = loss.masked_select(non_pad_mask).mean()
    else:
        # loss = F.cross_entropy(predict_logit, target, ignore_index=ignore_index)
        logit = logit[..., :-1, :].contiguous().view(-1, logit.size(-1))
        labels = target[..., 1:].contiguous().view(-1)
        loss = F.cross_entropy(logit, labels, ignore_index=ignore_index)
    return loss