import torch
import torch.nn as nn
from collections import OrderedDict

class BasicBlock(nn.Module):

    expansion = 1
    ### 此处BasicBlock的两个卷积核，一个stride为stride（通常为2），另一个stride为1，
    ### 保证了经过一个BasicBlock, 图像尺寸变为原来的1/stride。
    def __init__(self, in_planes, planes, stride=1, downsample = None):
        super(BasicBlock, self).__init__()

        self.add_module('conv1', nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1))
        self.add_module('bn1', nn.BatchNorm2d(planes))
        self.add_module('relu1', nn.ReLU(inplace=True))

        self.add_module('conv2', nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1))
        self.add_module('bn2', nn.BatchNorm2d(planes))
        self.add_module('relu2', nn.ReLU(inplace=True))
        self.downsample = downsample
        self.stride = stride

    def forward(self, x):

        residual = x
        out = self.relu1(self.bn1(self.conv1(x)))
        out = self.bn2(self.conv2(out))
        if self.downsample:
            residual = self.downsample(residual)

        out += residual
        output = self.relu2(out)

        return output


class BottleNeck(nn.Module):

    expansion = 4
    ### 同样的，对于BottleNeck的三个卷积核，只有中间的kernel_size=3的卷积核，
    ### 其stride = stride,也保证了经过一个BottleNeck，图像尺寸只变为原来的1/stride。
    def __init__(self, in_planes, planes, stride=1, downsample = None):
        super(BottleNeck, self).__init__()

        self.add_module('conv1', nn.Conv2d(in_planes, planes, kernel_size=1, bias=False))
        self.add_module('bn1', nn.BatchNorm2d(planes))
        self.add_module('relu1', nn.ReLU(inplace=True))

        self.add_module('conv2', nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1))
        self.add_module('bn2', nn.BatchNorm2d(planes))
        self.add_module('relu2', nn.ReLU(inplace=True))

        self.add_module('conv3', nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False))
        self.add_module('bn3', nn.BatchNorm2d(self.expansion*planes))
        self.add_module('relu3', nn.ReLU(inplace=True))

        self.downsample = downsample
        self.stride = stride

    def forward(self, x):

        residual = x

        out = self.relu1(self.bn1(self.conv1(x)))
        out = self.relu2(self.bn2(self.conv2(out)))
        out = self.bn3(self.conv3(out))

        if self.downsample:
            residual = self.downsample(residual)
        print('residual:', residual.shape)
        print('out:', out.shape)
        out += residual
        out = self.relu3(out)

        return out


class ResNet(nn.Module):

    def __init__(self, block, layers, num_class = 1000):
        super(ResNet, self).__init__()

        self.in_planes = 64

        self.features = nn.Sequential(
            OrderedDict([('conv0', nn.Conv2d(3, 64, kernel_size=7, padding=3, stride=2)), ])
        )
        self.features.add_module('bn1', nn.BatchNorm2d(64))
        self.features.add_module('relu1', nn.ReLU(inplace=True))
        self.features.add_module('max_pooling', nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
        #
        self.features.add_module('block1', self._make_layer(block, 64, layers[0], stride=1))
        self.features.add_module('block2', self._make_layer(block, 128, layers[1], stride=2))
        self.features.add_module('block3', self._make_layer(block, 256, layers[2], stride=2))
        self.features.add_module('block4', self._make_layer(block, 512, layers[3], stride=2))

        self.features.add_module('avg_pooling', nn.AvgPool2d(kernel_size=7))

        self.classifier = nn.Linear(2048, num_class)


    def _make_layer(self, block, planes, num_blocks, stride=1):

        downsample = None
        

        #重点在于，若该块中，含有多个Basic Block 或多个Bottleneck,
        #则层数的expansion，以及图像大小的变化，均发生在第一个子BasicBlock或Bottleneck，
        #在此处的downsample的设置中可以看出，
        #in_channels = self.in_planes, out_channels = planes * block.expansion,
        #图像的channel变化即在此发生。
        #同样的，对于图片尺寸，Resnet保证经过该块后图像缩小为当前的一半，#
        #即通过此处的stride = stride来保证。
        #在第一次layer.append()之后，后面的layer.append(),
        #其深度就不再扩大，一直为第一个子Block的输出planes * block.expansion,
        #并且图像大小也不变化，即stride为默认的1。
        if stride != 1 or self.in_planes != planes * block.expansion:
            downsample = nn.Sequential(nn.Conv2d(self.in_planes, planes*block.expansion, stride= stride, kernel_size=1, bias=False))

        layers = []

        layers.append(block(self.in_planes, planes, stride=stride, downsample = downsample))

        self.in_planes = planes * block.expansion
        for i in range(1, num_blocks):
            layers.append(block(self.in_planes, planes))

        return nn.Sequential(*layers)

    def forward(self, x):

        out = self.features(x)

        out = out.view(out.size(0), -1)

        output = self.classifier(out)

        return output


model = ResNet(BottleNeck, [3, 3, 3, 3], num_class=4)

x = torch.randn(1, 3, 224, 224)
y = model(x)