# -*- coding:utf-8 -*-
"""
# @file name    : module_containers.py
# @author       : QuZhang
# @date         : 2020-12-13 16:11
# @brief        : 模型容器：用于包装多个网络层
"""
import torch
import torch.nn as nn
from collections import OrderedDict
import torchvision


# 法1： Sequential
# 特点：顺序性，常用于子网络块的构建
class LeNetSequential(nn.Module):
    """ 按顺序包装LeNet
    """

    def __init__(self, classes):
        super(LeNetSequential, self).__init__()
        # 包装网络的模块
        # 1 卷积池化层
        self.features = nn.Sequential(
            nn.Conv2d(3, 6, 5),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2, stride=2),
            nn.Conv2d(6, 16, 5),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2, stride=2),
        )
        # 2 全连接 分类器层
        self.classifier = nn.Sequential(
            nn.Linear(16*5*5, 120),
            nn.ReLU(),
            nn.Linear(120, 84),
            nn.ReLU(),
            nn.Linear(84, classes),
        )

    def forward(self, x):
        x = self.features(x)
        x = x.view(x.size()[0], -1)  # 快速改变形状为2维，每一张图片作为一行
        x = self.classifier(x)
        return x


class  LeNetSequentialOrderDict(nn.Module):
    """按顺序封装网络层，并为每一层命名
    """

    def __init__(self, classes):
        super().__init__()

        # 使用有序字典来为网络的每一层命名
        self.features = nn.Sequential(OrderedDict({
            'conv1': nn.Conv2d(3, 6, 5),
            'relu1': nn.ReLU(inplace=True),
            "pool1": nn.MaxPool2d(kernel_size=2, stride=2),

            'conv2': nn.Conv2d(6, 16, 5),
            'relu2': nn.ReLU(inplace=True),
            'pool2': nn.MaxPool2d(kernel_size=2, stride=2),
        }))

        self.classifier = nn.Sequential(OrderedDict({
            "fc1": nn.Linear(16*5*5, 120),
            "relu3": nn.ReLU(inplace=True),
            "fc2": nn.Linear(120, 84),
            "relu4": nn.ReLU(inplace=True),
            'fc3': nn.Linear(84, classes),
        }))

    def forward(self, x):
        x = self.features(x)  # Sequential 自带forward，完成前向传播
        x = x.view(x.size()[0], -1)
        x = self.classifier(x)
        return x


# 法2： ModuleList
# 特点：迭代性，常用于大量重复子网络的构建，通过for循环实现重复构建
class ModuleList(nn.Module):
    def __init__(self):
        super().__init__()
        # 创建包含20个全连接层的网络
        self.linears = nn.ModuleList([nn.Linear(10, 10) for i in range(20)])

    def forward(self, x):
        for i, linear in enumerate(self.linears):
            x = linear(x)
        return x


# 法3： ModuleDict
# 特点：索引性，常用于可以选择/组合的子网络层
class ModuleDict(nn.Module):
    def __init__(self):
        super().__init__()
        # 用字典将模块创建好
        self.choices = nn.ModuleDict({
            'conv': nn.Conv2d(10, 10, 3),
            'pool': nn.MaxPool2d(3),
        })

        self.activations = nn.ModuleDict({
            'relu': nn.ReLU(),
            'prelu': nn.PReLU(),
        })

    def forward(self, x, choices, act):
        # 根据字典键值来选择模块，之后前向传播
        x = self.choices[choices](x)
        x = self.activations[act](x)
        return x


class MyAlexNet(nn.Module):
    def __init__(self, classes):
        super().__init__()
        self.features = nn.Sequential(OrderedDict({
            "conv1": nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),
            "relu1": nn.ReLU(inplace=True),
            "maxpool1": nn.MaxPool2d(kernel_size=3, stride=2),
            "conv2": nn.Conv2d(64, 192, kernel_size=5, padding=2),
            "relu2": nn.ReLU(inplace=True),
            'maxpool2': nn.MaxPool2d(kernel_size=3, stride=2),
            "conv3": nn.Conv2d(192, 384, kernel_size=3, padding=1),
            "relu3": nn.ReLU(inplace=True),
            "conv4": nn.Conv2d(384, 256, kernel_size=3, padding=1),
            "relu4": nn.ReLU(inplace=True),
            "conv5": nn.Conv2d(256, 256, kernel_size=3, padding=1),
            "relu5": nn.ReLU(inplace=True),
            "maxpool5": nn.MaxPool2d(kernel_size=3, stride=2),
        }))
        self.avgpool = nn.AdaptiveAvgPool2d((6, 6))
        self.classifier = nn.Sequential(OrderedDict({
            "dropout1": nn.Dropout(),
            "fc1": nn.Linear(256 * 6 * 6, 4096),
            "relu6": nn.ReLU(inplace=True),
            "dropout2": nn.Dropout(),
            "fc2": nn.Linear(4096, 4096),
            "relu7": nn.ReLU(inplace=True),
            "fc3": nn.Linear(4096, classes),
        }))

    def forward(self, x):
        x = self.features(x)
        x = self.avgpool(x)
        x = torch.flatten(x, 1)
        x = self.classifier(x)
        return x


if __name__ == "__main__":
    # method = "Sequential"
    # method = "ModuleList"
    # method = "ModuleDict"
    method = "AlexNet"

    if method == "Sequential":
        # net = LeNetSequential(classes=2)
        net = LeNetSequentialOrderDict(classes=2)

        fake_img = torch.randn((4, 3, 32, 32), dtype=torch.float32)

        output = net(fake_img)

        print(net)
        print(output)

    if method == "ModuleList":
        net = ModuleList()
        print(net)
        fake_data = torch.ones(10, 10)
        output = net(fake_data)
        print(output)

    if method == "ModuleDict":
        net = ModuleDict()
        print(net)
        fake_img = torch.randn((4, 10, 32, 32))
        output = net(fake_img, 'conv', 'relu')

    if method == "AlexNet":
        # alexnet = torchvision.models.AlexNet()
        alexnet = MyAlexNet(classes=2)
        print(alexnet._modules['features']._modules.keys())