import torch.nn as nn


#卷积模块，由卷积核和激活函数组成
class conv_block(nn.Module):
    def __init__(self,ks,ch_in,ch_out):
        super(conv_block,self).__init__()
        self.conv = nn.Sequential(
            nn.Conv2d(ch_in, ch_out, kernel_size=ks,stride=1,padding=1,bias=True),  #二维卷积核，用于提取局部的图像信息
            nn.ReLU(inplace=True), #这里用ReLU作为激活函数
            nn.BatchNorm2d(ch_out),
            nn.Dropout2d(0.5),
            nn.Conv2d(ch_out, ch_out, kernel_size=ks,stride=1,padding=1,bias=True),
            nn.ReLU(inplace=True),
            nn.BatchNorm2d(ch_out),
            nn.Dropout2d(0.5),
        )
    def forward(self,x):
        return self.conv(x)


# 常规CNN模块（由几个卷积模块堆叠而成）
class CNN(nn.Module):
    def __init__(self, kernel_size=3, in_ch=3):
        super(CNN, self).__init__()
        feature_list = [16, 32, 64, 128, 256]  # 代表每一层网络的特征数，扩大特征空间有助于挖掘更多的局部信息
        self.conv1 = conv_block(kernel_size, in_ch, feature_list[0])
        self.conv2 = conv_block(kernel_size, feature_list[0], feature_list[1])
        self.conv3 = conv_block(kernel_size, feature_list[1], feature_list[2])
        self.conv4 = conv_block(kernel_size, feature_list[2], feature_list[3])
        self.conv5 = conv_block(kernel_size, feature_list[3], feature_list[4])
        self.fc = nn.Sequential(  # 全连接层主要用来进行分类，整合采集的局部信息以及全局信息
            nn.Linear(feature_list[4] * 14 * 14, 64),
            nn.ReLU(),
            nn.Linear(64, 32),
            nn.ReLU(),
            nn.Linear(32, 1) #5
        )
        self.maxpooling = nn.MaxPool2d(kernel_size=2)

    def forward(self, x):
        x1 = self.conv1(x)
        x1 = self.maxpooling(x1)
        x2 = self.conv2(x1)
        x2 = self.maxpooling(x2)
        x3 = self.conv3(x2)
        x3 = self.maxpooling(x3)
        x4 = self.conv4(x3)
        x4 = self.maxpooling(x4)
        x5 = self.conv5(x4)
        x5 = x5.view(x5.size()[0], -1)  # 全连接层相当于做了矩阵乘法，所以这里需要将维度降维来实现矩阵的运算
        out = self.fc(x5)
        return out