import math
from collections import OrderedDict

import torch
import torch.nn as nn


# 一些见名不知意的变量（参数）
# inplanes：输入通道数
# planes：输出通道数
# 调用顺序：DarkNet -> DarkNet._make_layer -> BasicBlock

#---------------------------------------------------------------------#
#   残差结构
#   一个1x1卷积（conv1*1 padding=0）下降通道数，然后用一个3x3卷积（padding=1）提取特征并且上升通道数
#   最后接上一个残差边
# 
#   注意！！！
#   1. 3*3的通道数是1*1的两倍，所以planes是一个数组（分别存储卷积核的个数）
#   2. 经过下采样后，才开始残差堆叠。因为残差结构不改变输入tensor的shape，所以下采样的tensor才可以和残差后的tensor经行相加
#---------------------------------------------------------------------#
class BasicBlock(nn.Module):
    def __init__(self, inplanes, planes):
        # torch的Conv2d涉及output的深度，不涉及output的size。之所以使用计算公式是因为 最后的FC层需要拉直（C*W*H）。
        super(BasicBlock, self).__init__()
        self.conv1 = nn.Conv2d(inplanes, planes[0], kernel_size=1,
                               stride=1, padding=0, bias=False)
        self.bn1 = nn.BatchNorm2d(planes[0])
        self.relu1 = nn.LeakyReLU(0.1)
        
        self.conv2 = nn.Conv2d(planes[0], planes[1], kernel_size=3,
                               stride=1, padding=1, bias=False)
        self.bn2 = nn.BatchNorm2d(planes[1])
        self.relu2 = nn.LeakyReLU(0.1)

    def forward(self, x):
        # 残差边
        residual = x

        # 卷积边
        out = self.conv1(x)
        out = self.bn1(out)
        out = self.relu1(out)

        out = self.conv2(out)
        out = self.bn2(out)
        out = self.relu2(out)

        out += residual
        return out


#                               这两个加起来才是代码中的_make_layer
# 主干网络。可以看成：convolutional + (convolutional + residual) * n
#                      卷积操作     卷积核代替下采样  残差结构不改变shape！！！！
class DarkNet(nn.Module):
    def __init__(self, layers):
        super(DarkNet, self).__init__()
        self.inplanes = 32
        # 图中的convolutional是conv + BN + relu，也就是下面的三行代码
        # 416,416,3 -> 416( (416-3 + 2)/1 + 1 ),416,32
        self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False)
        self.bn1 = nn.BatchNorm2d(self.inplanes)
        self.relu1 = nn.LeakyReLU(0.1)

        # 用第一层make_layer举例。
        # 1. 下采样：416,416,32 -> 208( (416-3+2)/2 +1 ),208,64
        # 2. 残差结构：208,208,64 -> 208( (208-1)/1 +1 ),208,32 -> 208( (208-3+2)/1 +1 ),208,64
        # 3. 把1和2的output相加，然后输入下一层残差结构
        # []：残差块中各卷积核的个数；layers[x]=block：当前残差快重复的次数
        self.layer1 = self._make_layer([32, 64], layers[0])
        # 208,208,64 -> 104,104,128
        self.layer2 = self._make_layer([64, 128], layers[1])
        # 104,104,128 -> 52,52,256
        self.layer3 = self._make_layer([128, 256], layers[2])
        # 52,52,256 -> 26,26,512
        self.layer4 = self._make_layer([256, 512], layers[3])
        # 26,26,512 -> 13,13,1024
        self.layer5 = self._make_layer([512, 1024], layers[4])

        self.layers_out_filters = [64, 128, 256, 512, 1024]

        # 进行权值初始化
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()

    #---------------------------------------------------------------------#
    #   layer的结构：步长为2的3x3卷积(下采样) + 残差结构的堆叠
    #   因为
    #---------------------------------------------------------------------#
    def _make_layer(self, planes, blocks):
        # 因为主干网络是线性的，所以可以使用 Sequential+有序字典 构造网络
        layers = []
        # 下采样，步长为2，卷积核大小为3
        layers.append(("ds_conv", nn.Conv2d(self.inplanes, planes[1], kernel_size=3,
                                stride=2, padding=1, bias=False)))
        layers.append(("ds_bn", nn.BatchNorm2d(planes[1])))
        layers.append(("ds_relu", nn.LeakyReLU(0.1)))
        # 
        self.inplanes = planes[1]
        # 经行下采样后，再加入残差结构(BasicBlock)
        for i in range(0, blocks):
            layers.append(("residual_{}".format(i), BasicBlock(self.inplanes, planes)))
        return nn.Sequential(OrderedDict(layers))

    def forward(self, x):
        # 第一层卷积
        x = self.conv1(x)
        x = self.bn1(x)
        x = self.relu1(x)

        x = self.layer1(x)
        x = self.layer2(x)
        # 需要在最后三个重复残差结构 的输出结果上进行预测 
        out3 = self.layer3(x)
        out4 = self.layer4(out3)
        out5 = self.layer5(out4)

        return out3, out4, out5


def darknet53(pretrained, **kwargs):
    # 残差结构的重复次数
    model = DarkNet([1, 2, 8, 8, 4])
    # 有预训练模型的话就加载预训练模型
    if pretrained:
        # 加载之前检查 预训练模型的路径参数是否合法（因该是这样，总之看不懂）
        if isinstance(pretrained, str):
            model.load_state_dict(torch.load(pretrained))
        else:
            raise Exception("darknet request a pretrained path. got [{}]".format(pretrained))
    return model