from pytorch_loss import LogTaylorSoftmaxV3
from torch.nn import ELU
import torch.nn as nn
import torch
import torch.nn.functional as F
import torchvision

class EluTaylorSoftmax(nn.Module):
    """
        用于替代通常Softmax，使用Elu与TaylorSoftmax的组合，将非线性部分由
        指数转化为多项式，使用ELU保证单调性。
    """

    def __init__(self, dim=1, n=2):
        super().__init__()
        self.elu = ELU()
        self.taylor_softmax = LogTaylorSoftmaxV3(dim=dim, n=n)

    def forward(self, x):
        """
            usage similar to nn.Softmax:
            >>> mod = TaylorSoftmaxV3(dim=1, n=4)
            >>> inten = torch.randn(1, 32, 64, 64)
            >>> out = mod(inten)
        """
        x = self.elu(x)
        out = self.taylor_softmax(x)
        return out

class EluTaylorSoftmaxLoss(nn.Module):
    """
        用于替代通常cross entropy，使用Elu与TaylorSoftmax的组合，将非线性部分由
        指数转化为多项式，使用ELU保证单调性。
    """
    def __init__(self, n=2, ignore_index=-1, reduction='mean'):
        super(EluTaylorSoftmaxLoss, self).__init__()
        assert n % 2 == 0
        self.elu_taylor_softmax = EluTaylorSoftmax(dim=1, n=n)
        self.reduction = reduction
        self.ignore_index = ignore_index

    def forward(self, logits, labels):
        '''
        usage similar to nn.CrossEntropyLoss:
            >>> crit = EluTaylorSoftmaxLoss(n=4)
            >>> inten = torch.randn(1, 10, 64, 64)
            >>> label = torch.randint(0, 10, (1, 64, 64))
            >>> out = crit(inten, label)
        '''
        log_probs = self.elu_taylor_softmax(logits)
        loss = F.nll_loss(log_probs, labels, reduction=self.reduction,
                ignore_index=self.ignore_index)
        return loss

def logsigsoftmax_v1(logits, dim=1):
    """
    Computes sigsoftmax from the paper - https://arxiv.org/pdf/1805.10829.pdf
    """
    max_values = torch.max(logits, dim, keepdim = True)[0]
    exp_logits_sigmoided = torch.exp(logits - max_values) * torch.sigmoid(logits)
    sum_exp_logits_sigmoided = exp_logits_sigmoided.sum(1, keepdim = True)
    log_probs = logits - max_values + torch.log(torch.sigmoid(logits)) - torch.log(sum_exp_logits_sigmoided)
    return log_probs

def logsigsoftmax_v2(logits, dim=1):
    """
    v 1与 v2 差别在于 pytorch 计算softmax时有一个中心化的过程,v1 与 v2 实质上应该等同
    """
    sigmoid_logits = logits.sigmoid().log()
    sigsoftmax_logits = logits + sigmoid_logits
    return sigsoftmax_logits.log_softmax(dim=dim)

class SigSoftmaxV1(nn.Module):
    """
        Sigmoid 加上 softmax的实现
    """
    def __init__(self, dim=1):
        super().__init__()
        self.dim = dim

    def forward(self, logits):
        return logsigsoftmax_v1(logits, dim=self.dim)


class SigSoftmaxV2(nn.Module):
    def __init__(self, dim=1):
        super().__init__()
        self.dim = dim

    def forward(self, logits):
        return logsigsoftmax_v2(logits, dim=self.dim)

class SigSoftmaxLoss(nn.Module):
    def __init__(self, ver="v1", ignore_index=-1, reduction='mean'):
        super().__init__()
        self.ignore_index = ignore_index
        self.reduction = reduction
        if ver=="v1":
            self.sigsoftmax = SigSoftmaxV1(dim=1)
        elif ver=="v2":
            self.sigsoftmax = SigSoftmaxV2(dim=1)
        else:
            raise NotImplementedError("不存在该版本{}的SigSoftmaxloss".format(ver))

    def forward(self, logits, labels):
        '''
        usage similar to nn.CrossEntropyLoss:
            >>> crit = SigSoftmaxLoss()
            >>> inten = torch.randn(1, 10, 64, 64)
            >>> label = torch.randint(0, 10, (1, 64, 64))
            >>> out = crit(inten, label)
        '''
        log_probs = self.sigsoftmax(logits)
        loss = F.nll_loss(log_probs, labels, reduction=self.reduction,
                ignore_index=self.ignore_index)
        return loss

if __name__ == "__main__":
    # m = EluTaylorSoftmaxLoss(n=2).cuda()
    # input = torch.randn(3,3,9).cuda()
    # out = m(input)
    # print(out)

    import numpy as np
    import torchvision
    torch.backends.cudnn.deterministic = True
    #  tsoftmax = TaylorSoftmaxV3(dim=0, n=4)
    #  inten = torch.randn(3, 4, 5, 6).cuda()
    #  out = tsoftmax(inten)
    #  print(out.size())
    #  print(out)
    #  print(out[:, 0, 0, :4])
    #  print(out[:, 0, 0, :4].sum(dim=0))


    class Model(nn.Module):
        def __init__(self, softmax='v1'):
            super(Model, self).__init__()
            net = torchvision.models.resnet18(pretrained=False)
            self.conv1 = net.conv1
            self.bn1 = net.bn1
            self.maxpool = net.maxpool
            self.relu = net.relu
            self.layer1 = net.layer1
            self.layer2 = net.layer2
            self.layer3 = net.layer3
            self.layer4 = net.layer4
            self.fc = nn.Conv2d(512, 19 * 32 * 32, 3, 1, 1)
            self.upsample = nn.PixelShuffle(32)
            self.softmax = softmax
            if softmax == 'v1':
                obj = SigSoftmaxV1
                self.softmax1 = obj(dim=0)
                self.softmax2 = obj(dim=1)
                self.softmax3 = obj(dim=2)
                self.softmax4 = obj(dim=3)
            else:
                obj = SigSoftmaxV2
                self.softmax1 = obj(dim=0)
                self.softmax2 = obj(dim=1)
                self.softmax3 = obj(dim=2)
                self.softmax4 = obj(dim=3)

        def forward(self, x):
            feat = self.conv1(x)
            feat = self.bn1(feat)
            feat = self.relu(feat)
            feat = self.maxpool(feat)
            feat = self.layer1(feat)
            feat = self.softmax1(feat)
            feat = self.layer2(feat)
            #  arr = feat.cpu().detach().numpy().tofile('tmp.npy')
            #  size = feat.size()
            #  arr = np.fromfile('tmp.npy', dtype=np.float32)
            #  feat = torch.from_numpy(arr).cuda().view(size)
            feat = self.softmax2(feat)
            feat = self.layer3(feat)
            feat = self.softmax3(feat)
            feat = self.layer4(feat)
            feat = self.softmax4(feat)
            feat = self.fc(feat)
            out = self.upsample(feat)
            #  out = F.interpolate(feat, x.size()[2:], mode='bilinear', align_corners=True)
            return out

    red = 'mean'
    bs = 64
    net1 = Model(softmax='v1')
    net2 = Model(softmax='v3')
    net2.load_state_dict(net1.state_dict())
    net1.cuda()
    net2.cuda()
    net1.train()
    net2.train()

    criteria1 = SigSoftmaxLoss(ignore_index=255, reduction=red)
    criteria2 = SigSoftmaxLoss(ignore_index=255, reduction=red)
    #  criteria1 = nn.CrossEntropyLoss(ignore_index=255)
    #  criteria2 = nn.CrossEntropyLoss(ignore_index=255)

    optim1 = torch.optim.SGD(net1.parameters(), lr=1e-2)
    optim2 = torch.optim.SGD(net2.parameters(), lr=1e-2)

    for it in range(5000):
        inten = torch.randn(bs, 3, 224, 224).cuda()
        lbs = torch.randint(0, 19, (bs, 224, 224)).cuda()
        lbs[1, 1, 1] = 255
        lbs[30, 3, 2:200] = 255
        lbs[18, 4:7, 8:200] = 255

        #  net2.load_state_dict(net1.state_dict())
        logits1 = net1(inten)
        logits2 = net2(inten)
        #  print('logits2.size(): ', logits2.size())
        #  print('torch.isnan(logits2.sum()): ', torch.isnan(logits2).sum())
        loss1 = criteria1(logits1, lbs)
        loss2 = criteria2(logits2, lbs)

        optim1.zero_grad()
        optim2.zero_grad()
        loss1.backward()
        loss2.backward()
        optim1.step()
        optim2.step()

        #  _ = input()
        with torch.no_grad():
            if (it+1) % 50 == 0:
            #  if True:
                print('iter: {}, ================='.format(it+1))
                print('out.weight: ', torch.max(torch.abs(net1.fc.weight - net2.fc.weight)).item())
                print('conv1.weight: ', torch.max(torch.abs(net1.conv1.weight - net2.conv1.weight)).item())
                print('\nloss: ', loss1.item() - loss2.item())
