import torch
from torch import nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np



class RingLoss(nn.Module):
    def __init__(self,alpha=0.5):
        super(RingLoss, self).__init__()
        self.alpha = alpha

    def forward(self,  emd, r):
        norm = torch.norm(emd,p=2,dim=1)
        r.expand_as(norm)

        return self.alpha * torch.mean((norm-r) * (norm - r))


class CrossEntropyWithLogSoftmax(nn.Module):
    def __init__(self):
        super(CrossEntropyWithLogSoftmax, self).__init__()


    def forward(self, logp, y_true ):
        '''
        :param logp: (N, C), Variable of FloatTensor, F.log_softmax(logits)
        :param y_true:  (N, C), Variable of FloatTensor
        :return:
        '''

        ylogp = y_true * logp
        return -ylogp.sum() / y_true.size(0)




class SoftmaxCrossEntropy(nn.Module):

    def __init__(self,ret_hard=0):
        super(SoftmaxCrossEntropy, self).__init__()
        self.ret_hard = ret_hard


    def forward(self, y_preds, y_true ):
        '''
        :param y_preds: (N, C), Variable of FloatTensor
        :param y_true:  (N, C), Variable of FloatTensor
        :return:
        '''

        logp = y_preds.log()    # (N, C)
        ylogp = y_true * logp

        if self.ret_hard:
            return ohem(-ylogp.sum(1),4).sum() / 4

        return -ylogp.sum() / y_true.size(0)



class BCELogitsLossWithMask(nn.Module):

    def __init__(self, size_average=True):
        super(BCELogitsLossWithMask, self).__init__()
        self.size_average = size_average

    def forward(self, input, target, mask):
        '''
        :param input: Variable of shape (N, C, H, W)  logits
        :param target:  Variable of shape (N, C, H, W)  0~1 float
        :param mask: Variable of shape (N, C)  0. or 1.  float
        :return:
        '''
        if not (target.size() == input.size()):
            raise ValueError("Target size ({}) must be the same as input size ({})".format(target.size(), input.size()))

        max_val = (-input).clamp(min=0)
        loss = input - input * target + max_val + ((-max_val).exp() + (-input - max_val).exp()).log()

        loss = loss * mask.unsqueeze(2).unsqueeze(3).expand_as(input)

        if self.size_average:
            return loss.sum() / (mask.sum()+1)
        else:
            return loss.sum()



def ohem(loss, top_k):
    '''
    :param loss: variable of size (bs, )
    :param top_k: shape of (bs,)
    :param batch_size:
    :return:
    '''

    _, pred = loss.topk(max(top_k, 1), 0, True, True)
    return loss.index_select(0,pred)



if __name__ == '__main__':
    a = torch.LongTensor([2,1,3])
    b = torch.arange(0,5)*3
    print a,b
    print b.index_select(0,a)


    # batch_size = 5
    # nb_digits = 4
    #
    # y1 = torch.LongTensor(batch_size).random_() % nb_digits
    # y2 = torch.LongTensor(batch_size).random_() % nb_digits
    # # y = Variable(y)
    # logits = torch.from_numpy(np.random.rand(batch_size,4)).float()
    # logits = Variable(logits)
    #
    # x1 = torch.from_numpy(np.random.rand(batch_size,128)).float()
    # x2 = torch.from_numpy(np.random.rand(batch_size, 128)).float()
    # # x = Variable(x)
    # y = torch.from_numpy(np.ones((10,128))).float()
    # y_ = torch.from_numpy(np.zeros((10,128))).float()
    # print y.log() * y_

