#
# models for YRBASIN
#
###############################################################################
#
import torch
import torch.nn.functional as F
# torch.nn.functional.relu()是函数调用，一般使用在foreward函数里。
# torch.nn.ReLU()是模块调用，一般在定义网络层的时候使用。

"""
批规范化层 - Batch Normalization
随机失活层 - Dropout
"""

# (N, C)
# torch.nn.BatchNorm1d()

# (N, C, H, W)
# torch.nn.BatchNorm2d()

# (N, C, D, H, W)
# torch.nn.BatchNorm3d()


class CNNFeature12(torch.nn.Module):
    def __init__(self, num_classes=5):
        """
        """
        super(CNNFeature12, self).__init__()
        # [12, 9, 9]
        self.model = torch.nn.Sequential(
            torch.nn.Conv2d(12, 12, 1),         # [12, 9, 9]
            torch.nn.MaxPool2d(2, stride=1),    # [12, 8, 8]
            torch.nn.ReLU(),

            torch.nn.Conv2d(12, 32, 3, padding=1),  # [32, 8, 8]
            torch.nn.Conv2d(32, 64, 3, padding=1),  # [64, 8, 8]
            torch.nn.MaxPool2d(2),                  # [64, 4, 4]
            torch.nn.ReLU(),

            torch.nn.Flatten(),  # [1024]

            torch.nn.Linear(1024, 256),
            torch.nn.ReLU(),
            torch.nn.Linear(256, 64),
            torch.nn.ReLU(),
            torch.nn.Linear(64, num_classes),
            torch.nn.ReLU(),
        )

    def forward(self, input: torch.Tensor):
        output = self.model(input)
        return output


class CNNFeature13(torch.nn.Module):
    def __init__(self, num_classes: int = 5):
        """
        参数：
            num_classes: 输出分类数量
        """
        super(CNNFeature13, self).__init__()
        # [13, 9, 9]
        self.model = torch.nn.Sequential(
            # 卷积层C1
            torch.nn.Conv2d(13, 13, 1),         # [13, 9, 9]
            torch.nn.MaxPool2d(2, stride=1),    # [13, 8, 8]
            torch.nn.ReLU(),
            # 卷积层C2
            torch.nn.Conv2d(13, 32, 3, padding=1),  # [32, 8, 8]
            torch.nn.ReLU(),
            # 卷积层C3
            torch.nn.Conv2d(32, 64, 3, padding=1),  # [64, 8, 8]
            torch.nn.MaxPool2d(2),                  # [64, 4, 4]
            torch.nn.ReLU(),

            # 扁平化
            torch.nn.Flatten(),  # [1024] <= 64*4*4

            # 全连接层FC1
            torch.nn.Linear(1024, 256),
            torch.nn.ReLU(),

            # 全连接层FC2
            torch.nn.Linear(256, 64),
            torch.nn.ReLU(),

            # 全连接层FC3
            torch.nn.Linear(64, num_classes),
        )

    def forward(self, input: torch.Tensor):
        output = self.model(input)
        return output


class CNNFeature21(torch.nn.Module):
    def __init__(self, nLayers: int = 21, nClasses: int = 5):
        """
        参数：
            nLayers: 训练集层数
            nClasses: 输出分类数量
        """
        super(CNNFeature21, self).__init__()
        # [21, 9, 9]
        self.model = torch.nn.Sequential(
            # 卷积层C1
            torch.nn.Conv2d(nLayers, nLayers, 1),           # [21, 9, 9]
            torch.nn.MaxPool2d(2, stride=1),                # [21, 8, 8]
            torch.nn.ReLU(),
            # 卷积层C2
            torch.nn.Conv2d(nLayers, 32, 3, padding=1),     # [32, 8, 8]
            torch.nn.ReLU(),
            # 卷积层C3
            torch.nn.Conv2d(32, 64, 3, padding=1),          # [64, 8, 8]
            torch.nn.MaxPool2d(2),                          # [64, 4, 4]
            torch.nn.ReLU(),

            # 扁平化
            torch.nn.Flatten(),  # [1024] <= 64*4*4

            # 全连接层FC1
            torch.nn.Linear(1024, 256),
            torch.nn.ReLU(),

            # 全连接层FC2
            torch.nn.Linear(256, 64),
            torch.nn.ReLU(),

            # 全连接层FC3
            torch.nn.Linear(64, nClasses),
        )

    def forward(self, input: torch.Tensor):
        output = self.model(input)
        return output


class CNNFeature12_bak(torch.nn.Module):
    def __init__(self):
        super(CNNFeature12_bak, self).__init__()

        self.relu = torch.nn.ReLU()

        # 1: [-1, 12, 9, 9] => [-1, 12, 8, 8]
        self.conv1 = torch.nn.Sequential(
            torch.nn.Conv2d(12, 12, 1),
            torch.nn.MaxPool2d(kernel_size=2, stride=1),
        )

        # 2: [-1, 12, 8, 8] => [-1, 64, 4, 4]
        self.conv2 = torch.nn.Sequential(
            torch.nn.Conv2d(12, 32, 3, padding=1),
            torch.nn.Conv2d(32, 64, 3, padding=1),
            torch.nn.MaxPool2d(2)
        )

        # 3: [-1, 64, 4, 4] => [-1, 64*4*4] = [-1, 1024]
        self.flatten = torch.nn.Flatten()

        # 4: 全连接 [-1, 1024] => [-1, 5]
        self.fc = torch.nn.Sequential(
            torch.nn.Linear(1024, 512),
            torch.nn.ReLU(),
            torch.nn.Linear(512, 256),
            torch.nn.ReLU(),
            torch.nn.Linear(256, 128),
            torch.nn.ReLU(),
            torch.nn.Linear(128, 64),
            torch.nn.ReLU(),
            torch.nn.Linear(64, 32),
            torch.nn.ReLU(),
            torch.nn.Linear(32, 5),
            torch.nn.ReLU()
        )

    def forward(self, x):
        in_size = torch.Tensor(x).size(0)
        x = self.conv1(x)
        x = self.relu(x)
        x = self.conv2(x)
        x = self.relu(x)
        # x = x.view(in_size, -1)
        x = self.flatten(x)
        x = self.fc(x)
        return x


class AlexNet(torch.nn.Module):
    def __init__(self, num_classes=1000):
        super(AlexNet, self).__init__()
        self.conv = torch.nn.Sequential(
            # 卷积层C1
            torch.nn.Conv2d(3, 96, 11, 4),
            torch.nn.ReLU(),
            torch.nn.MaxPool2d(3, 2),
            # 卷积层C2
            torch.nn.Conv2d(96, 256, 5, 1, 2),
            torch.nn.ReLU(),
            torch.nn.MaxPool2d(3, 2),
            # 卷积层C3
            torch.nn.Conv2d(256, 384, 3, 1, 1),
            torch.nn.ReLU(),
            # 卷积层C4
            torch.nn.Conv2d(384, 384, 3, 1, 1),
            torch.nn.ReLU(),
            # 卷积层C5
            torch.nn.Conv2d(384, 256, 3, 1, 1),
            torch.nn.ReLU(),
            torch.nn.MaxPool2d(3, 2)
        )
        # [b,256,6,6], 为了使输出特征图大小为6x6，自动求一个合适的核大小
        self.avgpool = torch.nn.AdaptiveAvgPool2d((6, 6))
        self.fc = torch.nn.Sequential(
            # 全连接层FC6
            torch.nn.Dropout(),
            torch.nn.Linear(256*6*6, 4096),
            torch.nn.ReLU(inplace=True),
            # 全连接层FC7
            torch.nn.Dropout(0.5),
            torch.nn.Linear(4096, 4096),
            torch.nn.ReLU(),
            # 全连接层FC8
            torch.nn.Linear(4096, num_classes),
        )

    def forward(self, img: torch.Tensor):
        feature: torch.Tensor = self.conv(img)
        feature = self.avgpool(feature)  # [b, 256, 6, 6]
        output = self.fc(feature.view(img.shape[0], -1))  # [1, 1000]
        return output


"""
16x16
"""


class ConvNet16(torch.nn.Module):
    def __init__(self):
        super(ConvNet16, self).__init__()
        # [-1, 12, 16, 16]
        self.model = torch.nn.Sequential(
            torch.nn.Conv2d(12, 32, 3, padding=1),  # [32, 16, 16]
            torch.nn.Conv2d(32, 64, 3),     # [64, 14, 14]
            torch.nn.Conv2d(64, 64, 3),     # [64, 12, 12]
            torch.nn.Conv2d(64, 128, 3),    # [128, 10, 10]
            torch.nn.Conv2d(128, 64, 3),    # [64, 8, 8]
            torch.nn.MaxPool2d(2),          # [64, 4, 4]

            torch.nn.Flatten(),             # [1024]
            torch.nn.Linear(1024, 512),     # [512]
            torch.nn.ReLU(),
            torch.nn.Linear(512, 256),      # [256]
            torch.nn.ReLU(),
            torch.nn.Linear(256, 128),      # [128]
            torch.nn.ReLU(),
            torch.nn.Linear(128, 64),      # [64]
            torch.nn.ReLU(),
            torch.nn.Linear(64, 32),      # [32]
            torch.nn.ReLU(),
            torch.nn.Linear(32, 5),      # [5]
            torch.nn.ReLU(),
        )

    def forward(self, x):
        x = self.model(x)
        return x


"""
demo
"""


class _Net_(torch.nn.Module):
    def __init__(self):
        super(_Net_, self).__init__()
        self.model = torch.nn.Sequential(
            torch.nn.Conv2d(3, 32, 5, padding=2),
            torch.nn.MaxPool2d(2),
            torch.nn.Flatten(),
            torch.nn.Linear(64, 10)
        )

    def forward(self, x):
        x = self.model(x)
        return x


class Net_Demo(torch.nn.Module):
    def __init__(self):
        super(Net_Demo, self).__init__()

        """
        conv1: [-1, 12, 9, 9] => [-1, 12, 8, 8]
        """
        self.conv1 = torch.nn.Sequential(
            torch.nn.Conv2d(12, 12, 1),
            torch.nn.BatchNorm2d(12)
        )

    def forward(self, x):
        x = self.conv1(x)
        return x
