#coding=utf8
import torch
from torch import nn
from torch.autograd import Variable
import math
import numpy as np
import torch.nn.functional as F

def sigmoid_rampup(step, stage_step):
    phase = 1.0 - 1.0*max(0,step) / stage_step
    return math.exp(-5*phase*phase)




class margin_fc(nn.Module):

    def __init__(self, embedding=128, classes=4, bias=None):
        super(margin_fc, self).__init__()
        self.num_class = classes
        self.embedding=128

        self.weight = nn.Parameter(torch.Tensor(classes, embedding))  #(4,128)
        if bias:
            self.bias = nn.Parameter(torch.Tensor(classes))
        else:
            self.register_parameter('bias', None)

        self.weight.data.uniform_(-0.1, 0.1)
        if self.bias is not None:
            self.bias.data.uniform_(-0.1, 0.1)

    def forward(self, input):
        '''
        :param input:  size of (bs, num_features)
        :return:
        '''
        logits = torch.matmul(input, torch.t(self.weight))  #(bs, classes)
        if self.bias is not None:
            logits += self.bias.unsqueeze(0).expand_as(logits)

        emd_norm = torch.norm(input,p=2,dim=1, keepdim=True)  # (bs,1)
        w_norm = torch.norm(torch.t(self.weight),p=2,dim=0, keepdim=True) # (1,4)



        margin = torch.matmul(emd_norm, w_norm)   #(bs, classes)

        # this margin should be multiplied by a onehot-mask when calculating loss
        return logits, margin

class margin_logits(nn.Module):

    def __init__(self, classes=4, theta=0.2, stage_step=1000, cuda=1):
        super(margin_logits, self).__init__()
        self.num_class = classes
        self.stage_step = stage_step
        self.theta = theta
        self.cuda = cuda

    def forward(self, logits, margin, labels , step):
        batch_size = labels.size(0)
        labels = labels.unsqueeze(1)

        if self.cuda:
            onehot = Variable(torch.FloatTensor(batch_size, self.num_class).zero_().cuda())
        else:
            onehot = Variable(torch.FloatTensor(batch_size, self.num_class).zero_())
        onehot.scatter_(dim=1, index=labels, source=1)  # (bs, logits)

        if step<self.stage_step:
            theta = self.theta * sigmoid_rampup(step, self.stage_step)
        else:
            theta = self.theta

        new_logits = logits - theta * margin * onehot
        return new_logits, theta



class grouplinear_clf(nn.Module):
    def __init__(self, group=3, classes=4, bias=None):
        super(grouplinear_clf, self).__init__()
        self.num_class = classes
        self.group = group

        self.weight = nn.Parameter(torch.Tensor(group, 1))  # (4,128)

        if bias:
            self.bias = nn.Parameter(torch.Tensor(classes))
        else:
            self.register_parameter('bias', None)

        self.weight.data.uniform_(-0.1, 0.1)
        if self.bias is not None:
            self.bias.data.uniform_(-0.1, 0.1)


    def forward(self, input):
        '''
        :param input:  size of (bs, group, num_class)
        :return: size of (bs, num_class)
        '''

        # print input.size(), self.weight.size()
        logits = torch.matmul(input.permute(0,2,1), self.weight).squeeze(2)  # (bs, classes)
        if self.bias is not None:
            logits += self.bias.unsqueeze(0).expand_as(logits)

        return logits

class NoiseAdapt(nn.Module):
    def __init__(self, classes, confusion_matrix):
        super(NoiseAdapt,self).__init__()
        self.weights = nn.Parameter(torch.Tensor(classes, classes))

        channel_weights = confusion_matrix.copy().astype(np.float32)
        channel_weights /= channel_weights.sum(axis=1, keepdims=True)
        # print channel_weights
        # perm_bias_weights[prediction,noisy_label] = log(P(noisy_label|prediction))
        channel_weights = np.log(channel_weights + 1e-8)
        self.weights.data = torch.from_numpy(channel_weights).float()

    def forward(self, logits):
        self.channel_mat = F.softmax(self.weights)
        proba = torch.matmul(F.softmax(logits), self.channel_mat)
        return proba



if __name__ == '__main__':


    x = np.arange(3*3).reshape((3,3))
    x = Variable(torch.from_numpy(x).float())

    y = (1+np.arange(3*1).reshape((3,1))) *0.3
    y = Variable(torch.from_numpy(y).float())


    x = F.softmax(x)
    na = NoiseAdapt(classes=3,confusion_matrix=np.arange(3*3).reshape(3,3))
    print x
    na(x)