import math
from torch import nn
import torch
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np

class cat_linear(nn.Module):
    def __init__(self,in_features, out_features_list, bias=True):
        super(cat_linear,self).__init__()
        self.in_features = in_features
        self.out_features_list = out_features_list
        for i,out_features in enumerate(out_features_list):
            self.add_module('fc%d'%i, nn.Linear(in_features, out_features, bias))

    def forward(self, x):
        x_cat = []
        # if self.training:
        #     f = F.log_softmax
        # else:
        f = F.softmax
        for i, _ in enumerate(self.out_features_list):
            x_cat.append(f(getattr(self, 'fc%d' % i)(x)))
        x_cat = torch.cat(x_cat, 1)

        return x_cat




class cat_linear2(nn.Module):
    def __init__(self,in_features, emd, out_features_list, bias=True):
        super(cat_linear2,self).__init__()
        self.in_features = in_features
        self.out_features_list = out_features_list
        for i,out_features in enumerate(out_features_list):
            self.add_module('fc1%d'%i, nn.Linear(in_features, emd, bias))
            self.add_module('fc2%d' % i, nn.Linear(emd, out_features, bias))

    def forward(self, x):
        x_cat = []
        for i, _ in enumerate(self.out_features_list):
            x_split = F.relu(getattr(self,'fc1%d'%i)(x))
            x_cat.append(F.softmax(getattr(self,'fc2%d'%i)(x_split)))
        x_cat = torch.cat(x_cat,1)

        return x_cat


class SPP_layer(nn.Module):
    def __init__(self, out_sizes=(), pooling='avg'):
        super(SPP_layer,self).__init__()
        self.out_sizes = out_sizes
        if pooling == 'avg':
            self.pool = F.avg_pool2d
        elif pooling == 'max':
            self.pool = F.max_pool2d
        else:
            raise ValueError('unknown pooling type')

    def forward(self, x):
        bs, c, h, w = x.size()
        x_cat = []
        for out_size in self.out_sizes:
            kernel_size = (int(h/out_size), int(w/out_size))
            stride = (int(1.*h/out_size), int(1.*w/out_size))
            x_cat.append(self.pool(x,kernel_size=kernel_size,stride=stride).view(bs,-1))
        return torch.cat(x_cat,-1)

class CrossStitch(nn.Module):
    def __init__(self, in_channels, mode ='channel_wise'):
        super(CrossStitch,self).__init__()
        self.mode = mode
        if self.mode == 'channel_wise':
            self.weight_aa = nn.Parameter(torch.Tensor(in_channels))
            self.weight_ab = nn.Parameter(torch.Tensor(in_channels))
            self.weight_ba = nn.Parameter(torch.Tensor(in_channels))
            self.weight_bb = nn.Parameter(torch.Tensor(in_channels))
        elif self.mode == 'layer_wise':
            self.weight_aa = nn.Parameter(torch.Tensor(1))
            self.weight_ab = nn.Parameter(torch.Tensor(1))
            self.weight_ba = nn.Parameter(torch.Tensor(1))
            self.weight_bb = nn.Parameter(torch.Tensor(1))

        self.weight_aa.data = torch.from_numpy(np.ones(self.weight_aa.data.size()) * 0.9).float()
        self.weight_ab.data = torch.from_numpy(np.ones(self.weight_aa.data.size()) * 0.1).float()
        self.weight_ba.data = torch.from_numpy(np.ones(self.weight_aa.data.size()) * 0.1).float()
        self.weight_bb.data = torch.from_numpy(np.ones(self.weight_aa.data.size()) * 0.9).float()

    def forward(self,xa,xb):
        if len(xa.size()) == 2:
            waa = self.weight_aa.unsqueeze(0).expand_as(xa)
            wab = self.weight_ab.unsqueeze(0).expand_as(xa)
            wba = self.weight_ba.unsqueeze(0).expand_as(xa)
            wbb = self.weight_bb.unsqueeze(0).expand_as(xa)
        elif len(xa.size()) == 3:
            waa = self.weight_aa.unsqueeze(0).unsqueeze(2).expand_as(xa)
            wab = self.weight_ab.unsqueeze(0).unsqueeze(2).expand_as(xa)
            wba = self.weight_ba.unsqueeze(0).unsqueeze(2).expand_as(xa)
            wbb = self.weight_bb.unsqueeze(0).unsqueeze(2).expand_as(xa)
        elif len(xa.size()) == 4:
            waa = self.weight_aa.unsqueeze(0).unsqueeze(2).unsqueeze(3).expand_as(xa)
            wab = self.weight_ab.unsqueeze(0).unsqueeze(2).unsqueeze(3).expand_as(xa)
            wba = self.weight_ba.unsqueeze(0).unsqueeze(2).unsqueeze(3).expand_as(xa)
            wbb = self.weight_bb.unsqueeze(0).unsqueeze(2).unsqueeze(3).expand_as(xa)
        else:
            raise ValueError('dims smaller than 2 or bigger than 4 is not considered')
        return xa*waa + xb * wba, xa*wab + xb*wbb


class CrossSELayer(nn.Module):
    def __init__(self, channel, reduction=16):
        super(CrossSELayer, self).__init__()
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.fc = nn.Sequential(
                nn.Linear(channel, channel // reduction),
                nn.ReLU(inplace=True),
                nn.Linear(channel // reduction, channel),
                nn.Sigmoid()
        )

    def forward(self, xa,xb):
        b, c, _, _ = xa.size()
        y = self.avg_pool(xa).view(b, c)
        y = self.fc(y).view(b, c, 1, 1)
        return xb * y

class cat_conv_linear(nn.Module):
    def __init__(self,in_features, out_features_list, bias=True):
        super(cat_conv_linear,self).__init__()
        self.in_features = in_features
        self.out_features_list = out_features_list
        for i,out_features in enumerate(out_features_list):
            self.add_module('fc%d'%i, nn.Conv2d(in_features, out_features, kernel_size=1,stride=1, padding=0, bias=bias))

    def forward(self, x):
        x_cat = []
        for i, _ in enumerate(self.out_features_list):
            x_cat.append(F.softmax(getattr(self,'fc%d'%i)(x)))
        x_cat = torch.cat(x_cat,1)

        return x_cat


class cat_conv_linear_sp(nn.Module):
    def __init__(self, in_features, out_features_list, bias=True):
        super(cat_conv_linear_sp, self).__init__()
        self.in_features = in_features
        self.out_features_list = out_features_list
        self.avgpool = nn.AdaptiveAvgPool2d(output_size=1)
        for i, out_features in enumerate(out_features_list):
            self.add_module('mask_conv%d' % i,
                            nn.Conv2d(in_features, 1, kernel_size=1, stride=1, padding=0, bias=bias))
            self.add_module('fc%d' % i, nn.Linear(in_features, out_features, bias))

    def forward(self, x):
        x_cat = []
        for i, _ in enumerate(self.out_features_list):
            mask = 1+F.sigmoid(getattr(self, 'mask_conv%d'%i)(x))
            x_ = self.avgpool(x*mask).view(x.size(0),-1)
            x_cat.append(F.softmax(getattr(self, 'fc%d' % i)(x_)))
        x_cat = torch.cat(x_cat, 1)

        return x_cat

class cat_conv_linear_sp_merge(nn.Module):
    def __init__(self, in_features, out_features_list, bias=True):
        super(cat_conv_linear_sp_merge, self).__init__()
        self.in_features1 = in_features[0]
        self.in_features2 = in_features[1]
        self.out_features_list = out_features_list
        self.avgpool = nn.AdaptiveAvgPool2d(output_size=1)
        for i, out_features in enumerate(out_features_list):
            self.add_module('mask1_conv%d' % i,
                            nn.Conv2d(in_features[0], 1, kernel_size=1, stride=1, padding=0, bias=bias))
            self.add_module('mask2_conv%d' % i,
                            nn.Conv2d(in_features[1], 1, kernel_size=1, stride=1, padding=0, bias=bias))
            self.add_module('fc%d' % i, nn.Linear(in_features[0]+in_features[1], out_features, bias))

    def forward(self, l7,l8):
        x_cat = []
        for i, _ in enumerate(self.out_features_list):
            mask_l7 = 1 + F.sigmoid(getattr(self, 'mask1_conv%d' % i)(l7))
            mask_l8 = 1 + F.sigmoid(getattr(self, 'mask2_conv%d' % i)(l8))
            l7_ = self.avgpool(l7 * mask_l7)
            l8_ = self.avgpool(l8 * mask_l8)
            x = torch.cat([l7_, l8_], dim=1).view(l7.size(0), -1)
            x_cat.append(F.softmax(getattr(self, 'fc%d' % i)(x)))
        x_cat = torch.cat(x_cat, 1)

        return x_cat



class cat_conv_linear_sp2(nn.Module):
    def __init__(self,pre_channels, in_features, out_features_list, bias=True):
        super(cat_conv_linear_sp2, self).__init__()
        self.in_features = in_features
        self.out_features_list = out_features_list
        self.avgpool = nn.AdaptiveAvgPool2d(output_size=1)
        for i, out_features in enumerate(out_features_list):
            self.add_module('mask_conv%d' % i,
                            nn.Conv2d(pre_channels, 1, kernel_size=1, stride=1, padding=0, bias=bias))
            self.add_module('fc%d' % i, nn.Linear(in_features, out_features, bias))

    def forward(self, x_pre, x):
        x_cat = []
        for i, _ in enumerate(self.out_features_list):
            mask = 1+F.sigmoid(getattr(self, 'mask_conv%d'%i)(x_pre))
            x_ = self.avgpool(x*mask).view(x.size(0),-1)
            x_cat.append(F.softmax(getattr(self, 'fc%d' % i)(x_)))
        x_cat = torch.cat(x_cat, 1)

        return x_cat

class NoiseAdapt(nn.Module):
    def __init__(self, classes, confusion_matrix):
        super(NoiseAdapt,self).__init__()
        self.weights = nn.Parameter(torch.Tensor(classes, classes))

        channel_weights = confusion_matrix.copy().astype(np.float32)
        channel_weights /= channel_weights.sum(axis=1, keepdims=True)
        # print channel_weights
        # perm_bias_weights[prediction,noisy_label] = log(P(noisy_label|prediction))
        channel_weights = np.log(channel_weights + 1e-8)
        self.weights.data = torch.from_numpy(channel_weights).float()

    def forward(self, proba_in):
        self.channel_mat = F.softmax(self.weights)
        proba = torch.matmul(proba_in, self.channel_mat)
        return proba


class NoiseAdaptCat(nn.Module):
    def __init__(self, in_features, out_features_list, confusion_matrixs, bias=True):
        super(NoiseAdaptCat, self).__init__()
        self.in_features = in_features
        self.out_features_list = out_features_list
        self.avgpool = nn.AdaptiveAvgPool2d(output_size=1)

        for i, out_features in enumerate(out_features_list):
            self.add_module('fc%d' % i, nn.Linear(in_features, out_features, bias))
            self.add_module('NoAdpt%d' % i, NoiseAdapt(out_features, confusion_matrixs[i]))

    def forward(self, x):
        x_cat = []
        for i, _ in enumerate(self.out_features_list):
            x_ = self.avgpool(x).view(x.size(0),-1)
            x_ = F.softmax(getattr(self, 'fc%d' % i)(x_))
            if self.training:
                x_ = getattr(self, 'NoAdpt%d' % i)(x_)
            x_cat.append(x_)
        x_cat = torch.cat(x_cat, 1)
        return x_cat


class BasicConv2d(nn.Module):

    def __init__(self, in_channels, out_channels,**kwargs):
        super(BasicConv2d, self).__init__()
        self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)
        self.bn = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0, affine=True)
        self.relu = nn.ReLU(inplace=False)

    def forward(self, x):
        x = self.conv(x)
        x = self.bn(x)
        x = self.relu(x)
        return x

class IncepText(nn.Module):
    def __init__(self, in_channels, out_channels):
        super(IncepText,self).__init__()
        self.b1_conv1 = BasicConv2d(in_channels, 256, kernel_size=1)
        self.b1_conv2 = BasicConv2d(256,256,kernel_size=1)
        self.b1_conv3 = BasicConv2d(256,256,kernel_size=3,padding=1)

        self.b2_conv1 = BasicConv2d(in_channels, 256, kernel_size=1)
        self.b2_conv2 = BasicConv2d(256, 256, kernel_size=(1,3) ,padding=(0,1))
        self.b2_conv3 = BasicConv2d(256, 256, kernel_size=(3, 1),padding=(1,0))
        self.b2_conv4 = BasicConv2d(256, 256, kernel_size=(3, 3),padding=(1,1))

        self.b3_conv1 = BasicConv2d(in_channels, 256, kernel_size=1)
        self.b3_conv2 = BasicConv2d(256, 256, kernel_size=(1, 5),padding=(0,2))
        self.b3_conv3 = BasicConv2d(256, 256, kernel_size=(5, 1),padding=(2,0))
        self.b3_conv4 = BasicConv2d(256, 256, kernel_size=(3, 3),padding=(1,1))

        self.b4_conv1 = BasicConv2d(in_channels, out_channels, kernel_size=1)

        self.catconv = BasicConv2d(256*3,out_channels,kernel_size=1)
        self.relu = nn.ReLU(inplace=True)
    def forward(self,x):
        b1 = self.b1_conv1(x)
        b1 = self.b1_conv2(b1)
        b1 = self.b1_conv3(b1)

        b2 = self.b2_conv1(x)
        b2 = self.b2_conv2(b2)
        b2 = self.b2_conv3(b2)
        b2 = self.b2_conv4(b2)

        b3 = self.b3_conv1(x)
        b3 = self.b3_conv2(b3)
        b3 = self.b3_conv3(b3)
        b3 = self.b3_conv4(b3)


        b4 = self.b4_conv1(x)
        x = torch.cat([b1,b2,b3],dim=1)
        x = self.relu(self.catconv(x) + b4)
        return x


class FcMap(nn.Module):
    def __init__(self, in_features, out_features, bias=True):
        super(FcMap, self).__init__()
        self.weight = nn.Parameter(torch.Tensor(out_features, in_features))
        if bias:
            self.bias = nn.Parameter(torch.Tensor(out_features))
        else:
            self.register_parameter('bias', None)
        self.reset_parameters()

    def reset_parameters(self):
        stdv = 1. / math.sqrt(self.weight.size(1))
        self.weight.data.uniform_(-stdv, stdv)
        if self.bias is not None:
            self.bias.data.uniform_(-stdv, stdv)

    def forward(self, input):
        '''
        :param input: feature map of size (bs, C, H, W)
        :return: logits of  size (bs, out_features),
                sum feature map of size (bs, out_features, H, W)
        '''

        fmap = F.linear(input.transpose(1,3), self.weight, self.bias)
        fmap = fmap.transpose(1,3)

        x = F.adaptive_avg_pool2d(input,output_size=1).view(input.size(0), -1)
        logits = F.linear(x, self.weight, self.bias)

        return logits, fmap


class cat_FcMap(nn.Module):
    def __init__(self,in_features, out_features_list, bias=True):
        super(cat_FcMap,self).__init__()
        self.in_features = in_features
        self.out_features_list = out_features_list
        for i,out_features in enumerate(out_features_list):
            self.add_module('FcAmp%d'%i, FcMap(in_features, out_features, bias))

    def forward(self, x):
        '''
        :param x:  feature map of size (bs, C, H, W)
        :return:   logits_cat of size (bs, c1+c2+... )
                    feamap_cat of size (bs, c1+c2+..., H, W)
        '''
        logps_cat = []
        fmaps_cat = []
        # if self.training:
        #     f = F.log_softmax
        # else:
        f = F.softmax
        for i, _ in enumerate(self.out_features_list):
            logits,fmaps = getattr(self, 'FcAmp%d' % i)(x)
            logps = f(logits)
            fmaps_cat.append(fmaps)
            logps_cat.append(logps)

        logps_cat = torch.cat(logps_cat, 1)
        fmaps_cat = torch.cat(fmaps_cat, 1)

        return logps_cat, fmaps_cat

def bilinear(x1,x2):
    bs1, C1, H1, W1 = x1.size()
    bs2, C2, H2, W2 = x2.size()
    assert bs1 == bs2 and H1 == H2 and W1 == W2, 'bs or H,W of x1 and x2 not equal'

    x1 = x1.view(bs1, C1, H1 * W1)
    x2 = x2.view(bs2, C2, H2 * W2)
    x = torch.matmul(x1, x2.permute(0, 2, 1)).view(-1, C1 * C2) / (H1 * W1)
    x = torch.mul(torch.sign(x), torch.sqrt(torch.abs(x) + 1e-12))
    x = F.normalize(x, p=2, dim=1)

    return x


if __name__ == '__main__':
    import numpy as np
    # model = IncepText(in_channels=10, out_channels=20)
    # x = torch.FloatTensor(1,10,225,225)
    # x = torch.autograd.Variable(x)
    # y = model(x)
    # print y.size()
    x1 = torch.arange(0,2*3*5*6).view(2,3,5,6)
    x2 = -torch.arange(0,2*7*5*6).view(2,7,5,6)

    bl = bilinear()
    x = bl(x1,x2)
    print x
    print x.size()
    # print x
    # x = torch.nn.functional.normalize()
    # print x.size()


