import torch
from torch import nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np



class RingLoss(nn.Module):
    def __init__(self,alpha=0.5):
        super(RingLoss, self).__init__()
        self.alpha = alpha

    def forward(self,  emd, r):
        norm = torch.norm(emd,p=2,dim=1)
        r.expand_as(norm)

        return self.alpha * torch.mean((norm-r) * (norm - r))


class SmoothCrossEntropy(nn.Module):
    def __init__(self):
        super(SmoothCrossEntropy, self).__init__()


    def forward(self, y_preds, y_true ):
        '''
        :param y_preds: (N, C), Variable of FloatTensor
        :param y_true:  (N, C), Variable of FloatTensor
        :return:
        '''

        logp = F.log_softmax(y_preds)    # (N, C)
        ylogp = y_true * logp

        return -ylogp.sum() / y_true.size(0)

class SoftmaxCrossEntropy(nn.Module):

    def __init__(self):
        super(SoftmaxCrossEntropy, self).__init__()


    def forward(self, y_preds, y_true ):
        '''
        :param y_preds: (N, C), Variable of FloatTensor
        :param y_true:  (N, C), Variable of FloatTensor
        :return:
        '''

        logp = y_preds.log()    # (N, C)
        ylogp = y_true * logp

        return -ylogp.sum() / y_true.size(0)

if __name__ == '__main__':


    batch_size = 5
    nb_digits = 4

    y1 = torch.LongTensor(batch_size).random_() % nb_digits
    y2 = torch.LongTensor(batch_size).random_() % nb_digits
    # y = Variable(y)
    logits = torch.from_numpy(np.random.rand(batch_size,4)).float()
    logits = Variable(logits)

    x1 = torch.from_numpy(np.random.rand(batch_size,128)).float()
    x2 = torch.from_numpy(np.random.rand(batch_size, 128)).float()
    # x = Variable(x)
    y = torch.from_numpy(np.ones((10,128))).float()
    y_ = torch.from_numpy(np.zeros((10,128))).float()
    print y.log() * y_

