
from numpy.lib import stride_tricks
import torch
import torch.nn as nn
import numpy as np

'''
Resnet+denoising的实现.
'''


'''
denoising模块, 这里embed,softmax参数选择是否使用非线性内积, 默认使用.
'''
class Denoising_Module(nn.Module):
    def __init__(self, n_in, embed = True, softmax = True):
        super(Denoising_Module, self).__init__()
        self.embed = embed
        self.softmax = softmax
        self.embedding_theta = nn.Conv2d(n_in, n_in // 2, kernel_size = 1, stride = 1)
        self.embedding_phi = nn.Conv2d(n_in, n_in // 2, kernel_size = 1, stride = 1)
        self.conv = nn.Conv2d(n_in, n_in, kernel_size = 1, stride = 1)
        self.bn = nn.BatchNorm2d(n_in)

    def forward(self, l):
        short_cut = l

        n_in, H, W = list(l.shape)[1:]  #l.shape.as_list()[1:]
        if self.embed: #如果要embed就是类似kernel method的非线性项.
            theta = self.embedding_theta(l)
            phi = self.embedding_phi(l)
            g = l
        else:
            theta, phi, g = l, l, l

        #这个einsum有点东西, 大概就是下表相乘的表示, 可以广泛表示一类矩阵操作.
        if n_in > H * W or self.softmax:
            f = torch.einsum('niab,nicd->nabcd', theta, phi) #这应该是对theta和phi的每一维做dot.
            if self.softmax:
                orig_shape = f.shape
                f = torch.reshape(f, [-1, H * W, H * W])
                f = f / torch.sqrt(torch.tensor(theta.shape[1], dtype = theta.dtype))
                #f = nn.Softmax(f)
                f = nn.functional.softmax(f, dim = -1)
                f = torch.reshape(f, orig_shape)
            f = torch.einsum('nabcd,nicd->niab', f, g)
        else:
            f = torch.einsum('nihw,njhw->nij', phi, g) #做dot计算相似度.
            f = torch.einsum('nij,nihw->njhw', f, theta) #根据f的系数求和.
        if not self.softmax:
            f = f / torch.cast(H * W, f.dtype)
        l = torch.reshape(f, l.shape)

        l = self.bn(self.conv(l))
        return l + short_cut


'''
构建resnet基本block, 即1*1,3*3,1*1的conv, 加上short cut.
'''
class ResNetBottleNeck(nn.Module):
    expansion = 4
    
    def __init__(self, input_feature, feature, stride = 1):
        super(ResNetBottleNeck, self).__init__()

        self.conv1 = nn.Conv2d(input_feature, feature, kernel_size = 1, bias = False)
        self.bn1 = nn.BatchNorm2d(feature)

        self.conv2 = nn.Conv2d(feature, feature, kernel_size = 3, stride = stride, padding = 1, bias=False)
        self.bn2 = nn.BatchNorm2d(feature)

        self.conv3 = nn.Conv2d(feature, self.expansion * feature, kernel_size = 1, bias=False)
        self.bn3 = nn.BatchNorm2d(self.expansion * feature)

        self.shortcut = nn.Sequential()
        if stride != 1 or input_feature != self.expansion * feature:
            self.shortcut = nn.Sequential(
                nn.Conv2d(input_feature, self.expansion * feature, kernel_size = 1, stride = stride, bias = False),
                nn.BatchNorm2d(self.expansion * feature)
            )

    def forward(self, l):
        short_cut = l
        l = nn.functional.relu(self.bn1(self.conv1(l)))
        l = nn.functional.relu(self.bn2(self.conv2(l)))
        l = self.bn3(self.conv3(l))

        l = self.shortcut(short_cut) + l
        return nn.functional.relu(l)

'''
Resnet+denoising的实现. 
depth是选择resnet的深度, 有50, 101和152.
dim是输入图片的shape, (channel, H, W).
group_func是resnet block构建的函数.
num_classes是分类的数量, 默认是10.


总输入为(batch_size, channel, H, W)其中channel为3或1.
总输出为(batch_size, num_classes), 没有取argmax.
'''

class ResNetDenoiseModel(nn.Module):
    def __init__(self, depth, input_dim, group_func=ResNetBottleNeck, num_classes=10):
        super(ResNetDenoiseModel, self).__init__()
        self.NUM_BLOCKS = {
            50: [3, 4, 6, 3],
            101: [3, 4, 23, 3],
            152: [3, 8, 36, 3]
        }

        #这里参数初始化需要注意.
        self.input_features = 64
        self.num_blocks = self.NUM_BLOCKS[depth]
        #输出是channel * H * W, 其中channel是3即RGB图像. 
        # 这里参数可能得根据图像大小调整, 但是基本的变量都和论文中保持一致.
        self.Conv2D = nn.Conv2d(in_channels = input_dim[0], out_channels = self.input_features, kernel_size = 1, stride = 1, padding = 1)
        self.max_pooling = nn.MaxPool2d(kernel_size = 1, stride = 1)
        self.group_layer1 = self._make_layer(group_func, 64, self.num_blocks[0], 1)
        self.group_layer2 = self._make_layer(group_func, 128, self.num_blocks[1], 2)
        self.group_layer3 = self._make_layer(group_func, 256, self.num_blocks[2], 2)
        self.group_layer4 = self._make_layer(group_func, 512, self.num_blocks[3], 2)
        #这里少一个全局均值池化操作，不知道是否有影响.
        #self.GlobalAvgPooling = 1 #????
        #这里全连接的初始化时有点不一样, 源代码指定了初始化的方法.
        self.fully_connected = nn.Linear(512 * group_func.expansion, num_classes)



    def _make_layer(self, group_func, features, count, stride = 1):
        layers = []
        for i in range(count):
            current_stride = stride if i == 0 else 1
            layers.append(group_func(self.input_features, features, current_stride))
            self.input_features = features * group_func.expansion
        layers.append(Denoising_Module(self.input_features, embed=True, softmax=True))
        return nn.Sequential(*layers)

    def forward(self, l, lower=None, upper=None):
        l = self.Conv2D(l)
        l = self.max_pooling(l)
        l = self.group_layer1(l)
        l = self.group_layer2(l)
        l = self.group_layer3(l)
        l = self.group_layer4(l)
        l = torch.mean(l, dim=(2,3)) #GlobalAvgPooling, 即把channel*H*W变成channel, 这个值为H*W个数的平均值.
        l = self.fully_connected(l)
        return l, lower, upper

'''
if __name__ == '__main__':
    image = np.zeros((2,3,100,100), dtype=np.float) #创建resnet 50
    image = torch.from_numpy(image)
    #print(image.shape[1:])
    layers = ResNetDenoiseModel(50, image.shape[1:], ResNetBottleNeck)
    layers = layers.double()

    output = layers(image)
    print(output)
'''