

import torch
import torch.nn as nn

from config import parser
def accuracy(output, target, topk=(1,)):
    """Computes the precision@k for the specified values of k"""""
    maxk = max(topk)
    batch_size = target.size(0)
    _, pred = output.topk(maxk, 1, True, True)
    pred = pred.t()
    correct = pred.eq(target.view(1, -1).expand_as(pred))

    res = []
    for k in topk:
        '''
        原因：用多卡训练的时候tensor不连续，即tensor分布在不同的内存或显存中。

        解决方法：对tensor进行操作时先调用contiguous()。如tensor.contiguous().view()。
        '''
        # correct_k = correct[:k].view(-1).float().sum(0)
        correct_k = correct[:k].contiguous().view(-1).float().sum(0)
        res.append(correct_k / batch_size * 100.0)

    return res



'''
自定义的交叉熵（多分类）,附带label_smooth
'''


class myLoss(nn.Module):
    def __init__(self):
        super(myLoss, self).__init__()
        self.args = parser.parse_args()
        self.smooth = self.args.label_smooth

    def forward(self, x, y):
        # print('-------------------------------')
        # print(x)
        # print(torch.log_softmax(x,dim=1))
        # print(y)
        # print(y.mul(torch.log_softmax(x,dim=1)))
        # print(-torch.sum(y.mul(torch.log_softmax(x,dim=1))))
        # print('-------------------------------')
        # loss=-torch.sum(y.mul(torch.log_softmax(x,dim=1)))/x.size(0)

        x = torch.log_softmax(x, dim=1)
        y = y * (1 - self.smooth) + self.smooth / x.size(0)
        loss = -torch.sum(y.mul(x)) / x.size(0)

        return loss
