# 定义神经网络
import torch
from torch import nn


class NeuralNetwork(nn.Module):
        def __init__(self, num_classes):
            super(NeuralNetwork, self).__init__()
            # 定义卷积层
            self.layer1 = torch.nn.Sequential(
                torch.nn.Conv2d(3, 32, kernel_size=5),
                torch.nn.BatchNorm2d(32),
                torch.nn.ReLU(inplace=True)
            )

            # 定义池化层
            self.layer2 = torch.nn.Sequential(
                torch.nn.MaxPool2d(kernel_size=2, stride=2)
            )

            # 定义卷积层
            self.layer3 = torch.nn.Sequential(
                torch.nn.Conv2d(32, 64, kernel_size=5),
                torch.nn.BatchNorm2d(64),
                torch.nn.ReLU(inplace=True)
            )

            # 定义池化层
            self.layer4 = torch.nn.Sequential(
                torch.nn.MaxPool2d(kernel_size=2, stride=2)
            )

            # 定义全连接层
            self.fc = torch.nn.Sequential(
                torch.nn.Linear(64*72*72, 2048),  # 池化长度 * 池化宽度 * 卷积的BatchNorm2d
                torch.nn.ReLU(inplace=True),
                # 在第一个全连接层之后添加一个dropout层
                torch.nn.Dropout(0.2),
                torch.nn.Linear(2048, 128),  # 取上一层中的输出大小
                torch.nn.ReLU(inplace=True),
                # 在第二个全连接层之后添加一个dropout层
                torch.nn.Dropout(0.5),
                torch.nn.Linear(128, num_classes)  # 取上一层中的输出大小 128
            )

        def forward(self, x):
            x = self.layer1(x)
            x = self.layer2(x)
            x = self.layer3(x)
            x = self.layer4(x)
            x = x.view(x.size(0), -1)  # 在进入全连接层之前需要把数据拉直Flatten
            x = self.fc(x)
            return x