import torch
import torch.nn as nn
import numpy as np
from model.norm_dist import NormDist

net_width = 1024

class MLP(nn.Module):
    '''
    一个多层 l-infty 网络
    输入维度 input_dim（input_dim 是一个 ndarray，实际输入维度为 ndarray 中所有数的乘积），
    输出维度 num_classes，算上输入输出层的层数 depth，每层有 1024(net_width) * width 个神经元
    '''
    def __init__(self, depth, width, input_dim, num_classes=10):
        super(MLP, self).__init__()
        layers = []
        input_dim = np.prod(input_dim)
        for i in range(depth - 1):
            layers.append(NormDist(input_dim, net_width * width, bias=False, mean_normalize=True))
            input_dim = net_width * width
        self.fc = nn.ModuleList(layers)
        self.fc_last = NormDist(input_dim, num_classes, bias=True, mean_normalize=False)
    def forward(self, x, lower=None, upper=None):
        paras = [None if y is None else y.view(y.size(0), -1) for y in (x, lower, upper)]
        for layer in self.fc:
            paras = layer(*paras)
        paras = self.fc_last(*paras)
        paras = [None if y is None else -y for y in (paras[0], paras[2], paras[1])]
        return paras

class Denoising(nn.Module):
    '''
    a denoising module
    '''
    def __init__(self, channel, feature_dim):
        super(Denoising, self).__init__()
        self.channel = channel
        self.feature_dim = feature_dim
        self.conv1x1 = nn.Conv2d(channel, channel, kernel_size=(1,1))
    def forward(self, x):
        if x is None:
            return None
        x_smashed = x.view(-1, self.channel, self.feature_dim)
        covariance_mat = torch.matmul(x_smashed.transpose(1,2), x_smashed)
        covariance_mat = nn.functional.softmax(covariance_mat, dim=1)
        res = torch.matmul(x_smashed, covariance_mat)
        res = self.conv1x1(res.view_as(x)).view_as(x_smashed) + x_smashed
        return res.view_as(x)

class DenoisingAdvance(nn.Module):
    def __init__(self, channel, feature_dim, embed = True, softmax = True):
        super(DenoisingAdvance, self).__init__()
        self.embed = embed
        self.softmax = softmax
        self.embedding_theta = nn.Conv2d(channel, channel // 2, kernel_size = 1, stride = 1)
        self.embedding_phi = nn.Conv2d(channel, channel // 2, kernel_size = 1, stride = 1)
        self.conv = nn.Conv2d(channel, channel, kernel_size = 1, stride = 1)
        self.bn = nn.BatchNorm2d(channel)

    def forward(self, l):
        short_cut = l

        n_in, H, W = list(l.shape)[1:]  #l.shape.as_list()[1:]
        if self.embed: #如果要embed就是类似kernel method的非线性项.
            theta = self.embedding_theta(l)
            phi = self.embedding_phi(l)
            g = l
        else:
            theta, phi, g = l, l, l

        #这个einsum有点东西, 大概就是下表相乘的表示, 可以广泛表示一类矩阵操作.
        if n_in > H * W or self.softmax:
            f = torch.einsum('niab,nicd->nabcd', theta, phi) #这应该是对theta和phi的每一维做dot.
            if self.softmax:
                orig_shape = f.shape
                f = torch.reshape(f, [-1, H * W, H * W])
                f = f / torch.sqrt(torch.tensor(theta.shape[1], dtype = theta.dtype))
                #f = nn.Softmax(f)
                f = nn.functional.softmax(f, dim = -1)
                f = torch.reshape(f, orig_shape)
            f = torch.einsum('nabcd,nicd->niab', f, g)
        else:
            f = torch.einsum('nihw,njhw->nij', phi, g) #做dot计算相似度.
            f = torch.einsum('nij,nihw->njhw', f, theta) #根据f的系数求和.
        if not self.softmax:
            f = f / torch.cast(H * W, f.dtype)
        l = torch.reshape(f, l.shape)

        l = self.bn(self.conv(l))
        return l + short_cut

class Denoising_MLP(nn.Module):
    '''
    layer 1: conv2d
    layer 2: denoising
    layer 3: conv2d
    layer 4 ~ layer 3+depth: lipnet
    '''
    def __init__(self, depth, width, input_dim, num_classes=10):
        super(Denoising_MLP, self).__init__()
        self.input_channel = input_dim[0]
        layers = []
        input_dim = np.prod(input_dim[1:])
        self.conv1 = nn.Conv2d(self.input_channel, 64, kernel_size=(3,3), padding=1)
        self.denoising = Denoising(64, input_dim)
        self.conv2 = nn.Conv2d(64, 10, kernel_size=(3,3), padding=1)
        input_dim *= 10
        for i in range(depth - 1):
            layers.append(NormDist(input_dim, net_width * width, bias=False, mean_normalize=True))
            input_dim = net_width * width
        self.fc = nn.ModuleList(layers)
        self.fc_last = NormDist(input_dim, num_classes, bias=True, mean_normalize=False)
    def forward(self, x, lower=None, upper=None):
        x, lower, upper = [None if y is None else self.conv2(self.denoising(self.conv1(y))) for y in (x, lower, upper)]
        paras = [None if y is None else y.view(y.size(0), -1) for y in (x, lower, upper)]
        for layer in self.fc:
            paras = layer(*paras)
        paras = self.fc_last(*paras)
        paras = [None if y is None else -y for y in (paras[0], paras[2], paras[1])]
        return paras

class MLPFeature(nn.Module):
    '''
    一个多层 l-infty　网络
    和 MLP 一样，最后输出维度不是 num_classes 而是 1024(net_width) * width
    这个网络的输出的是一个特征向量，用来喂给后面的全连接层
    '''
    def __init__(self, depth, width, input_dim):
        super(MLPFeature, self).__init__()
        layers = []
        input_dim = np.prod(input_dim)
        for i in range(depth):
            layers.append(NormDist(input_dim, net_width * width, bias=False, mean_normalize=True))
            input_dim = net_width * width
        self.out_features = input_dim
        self.fc = nn.ModuleList(layers)
    def forward(self, x, lower=None, upper=None):
        paras = [None if y is None else y.view(y.size(0), -1) for y in (x, lower, upper)]
        for layer in self.fc:
            paras = layer(*paras)
        return paras
