from __future__ import absolute_import

import torch
import torch.nn.functional as F
from torch import nn


def to_contiguous(tensor):
    if tensor.is_contiguous():
        return tensor
    else:
        return tensor.contiguous()


def _assert_no_grad(variable):
    assert not variable.requires_grad, \
        "nn criterions don't compute the gradient w.r.t. targets - please " \
        "mark these variables as not requiring gradients"


class SequenceCrossEntropyLoss(nn.Module):
    def __init__(self,
                 weight=None,
                 size_average=True,
                 ignore_index=-100,
                 sequence_normalize=False,
                 sample_normalize=True):
        super(SequenceCrossEntropyLoss, self).__init__()
        self.weight = weight
        self.size_average = size_average
        self.ignore_index = ignore_index
        self.sequence_normalize = sequence_normalize
        self.sample_normalize = sample_normalize
        assert (sequence_normalize and sample_normalize) == False

    def forward(self, input, target, length):
        """
        input: b,l,c
        target: b,l
        length: b
        """
        _assert_no_grad(target)
        # print(input.size())
        # print(target.size())
        # print(length)
        batch_size, def_max_length = target.size(0), target.size(1)
        mask = torch.zeros(batch_size, def_max_length)
        for i in range(batch_size):
            mask[i, :length[i]].fill_(1)
        mask = mask.type_as(input)
        # truncate to the same size
        max_length = max(length)

        assert max_length == input.size(1)
        target = target[:, :max_length]
        mask = mask[:, :max_length]
        input = to_contiguous(input).view(-1, input.size(2))
        input = F.log_softmax(input, dim=1)
        target = to_contiguous(target).view(-1, 1)
        mask = to_contiguous(mask).view(-1, 1)
        output = - input.gather(1, target.long()) * mask
        # if self.size_average:
        #   output = torch.sum(output) / torch.sum(mask)
        # elif self.reduce:
        #   output = torch.sum(output)
        ##
        output = torch.sum(output)
        if self.sequence_normalize:
            output = output / torch.sum(mask)
        if self.sample_normalize:
            output = output / batch_size

        return output


class LabelSmoothCrossEntropyLoss(nn.Module):
    def __init__(self, eps=0.1, ignore_index=-100):
        super(LabelSmoothCrossEntropyLoss, self).__init__()
        self.eps = eps
        self.ignore_index = ignore_index

    def forward(self, input, target, length):
        """
        input: b,l,c
        target: b,l
        length: b
        """
        _assert_no_grad(target)
        assert 0 <= self.eps <= 1
        num_class = input.size(2)
        batch_size, def_max_length = target.size(0), target.size(1)
        mask = torch.zeros(batch_size, def_max_length)
        for i in range(batch_size):
            mask[i, :length[i]].fill_(1)
        mask = mask.type_as(input)
        max_length = max(length)
        assert max_length == input.size(1)
        target = target[:, :max_length]
        mask = mask[:, :max_length]
        input = to_contiguous(input).view(-1, input.size(2))
        input = F.log_softmax(input, dim=1)
        target = to_contiguous(target).view(-1, 1)
        mask = to_contiguous(mask).view(-1)
        soft_target = torch.empty(size=(target.size(0), num_class), device=target.device).fill_(self.eps / num_class).scatter_(1, target, 1 - self.eps)
        output = (-(soft_target * input).sum(-1)) * mask
        output = output.sum()
        output = output / batch_size
        return output


class CELoss(nn.Module):
    def __init__(self, ignore_index=95):
        super(CELoss, self).__init__()
        self.loss_func = nn.CrossEntropyLoss(ignore_index=ignore_index)  # 忽略"PADDING"x

    def forward(self, predicts, target_label):
        """
        predits: n * len * cls
        target_label: n*len
        count_label: n represent the length of each text sequence
        """
        pvam_pred = predicts
        cost_pvam = self.loss_func(pvam_pred.view(-1, pvam_pred.shape[-1]), target_label.contiguous().view(-1))
        return cost_pvam


if __name__ == '__main__':
    lsce = LabelSmoothCrossEntropyLoss(0.1)
    input = torch.randn(3, 5, 10)
    target = torch.LongTensor([[1, 3, 5, 6, 7], [1, 5, 2, 9, 9], [1, 2, 9, 9, 9]])
    length = torch.LongTensor([5, 3, 2])
    loss = lsce(input, target, length)
    print(loss)

    # target = torch.randint(1,10,(8,))
    # num_class=10
    # eps=0.1
    # print(target)
    # input = F.log_softmax(,dim=-1)

    # soft_target = torch.empty(size=(target.size(0),num_class),device=target.device).fill_(eps/(num_class-1)).scatter_(1,target.data.unsqueeze(1),1. - eps)
    # loss = -(soft_target*input).sum(-1)
    # print(loss.size())
    # print(soft_target)
