import torch
from torch import nn
# import torchvision
import matplotlib.pyplot as plt
# import torch.nn.functional as F


class cnnNet(nn.Module):

    def __init__(self):
        # 使用super()方法调用基类的构造器，即nn.Module.__init__(self)
        super(cnnNet, self).__init__()
        # 1 input image channel ,6 output channels,5x5 square convolution kernel 一个卷积层，5x5卷积核
        self.conv1 = nn.Conv2d(in_channels=1, out_channels=16, kernel_size=[1, 20], dilation=2)
        # 为了提升卷积神经网络的效果，在每个卷积层后添加激活函数，论文使用的激活函数为ReLU
        self.bn1 = nn.BatchNorm2d(num_features=16, eps=1e-05, affine=True)
        self.r1 = nn.ReLU()
        # 最大池化层2x2 步长为2
        self.mp1 = nn.MaxPool2d(kernel_size=[1, 2])

        # 16输入，32输出 步长为1 卷积核4x15
        self.conv2 = nn.Conv2d(in_channels=16, out_channels=32, kernel_size=[1, 20], dilation=2)
        # 为了提升卷积神经网络的效果，在每个卷积层后添加激活函数，论文使用的激活函数为ReLU
        self.bn2 = nn.BatchNorm2d(num_features=32, eps=1e-05, affine=True)
        self.mp2 = nn.MaxPool2d(kernel_size=[1, 2])
        self.r2 = nn.ReLU()
        # 最大池化层2x2 步长为2

        # 16输入 32 输出 卷积核3x3 步长为1
        self.conv3 = nn.Conv2d(in_channels=32, out_channels=32, kernel_size=[1, 20], dilation=2)
        self.bn3 = nn.BatchNorm2d(num_features=32, eps=1e-05, affine=True)
        self.mp3 = nn.MaxPool2d(kernel_size=[1, 10])
        self.r3 = nn.ReLU()
        # 32输入 64输出 3x3卷积核  步长1
        self.conv4 = nn.Conv2d(in_channels=32, out_channels=32, kernel_size=[1, 20], dilation=2, stride=[1, 10])
        self.bn4 = nn.BatchNorm2d(num_features=32, eps=1e-05, affine=True)
        self.mp4 = nn.MaxPool2d(kernel_size=[1, 10])
        self.r4 = nn.ReLU()

        self.conv5 = nn.Conv2d(in_channels=32, out_channels=32, kernel_size=[1, 5], stride=[1, 10], dilation=1)
        self.bn5 = nn.BatchNorm2d(num_features=32, eps=1e-05, affine=True)
        self.r5 = nn.ReLU()

        self.conv6 = nn.Conv2d(in_channels=32, out_channels=32, kernel_size=[1, 1], stride=[1, 4])
        self.bn6 = nn.BatchNorm2d(num_features=32, eps=1e-05, affine=True)
        self.r6 = nn.ReLU()

        self.conv7 = nn.Conv2d(in_channels=32, out_channels=1, kernel_size=[2, 1])
        self.bn7 = nn.BatchNorm2d(num_features=1, eps=1e-05, affine=True)
        self.r7 = nn.ReLU()

        # 全连接
        self.fc1 = nn.Linear(6, 3)
        self.fc2 = nn.Linear(3, 1)
        self.out = nn.Sigmoid()

    def forward(self, x):
        x = self.conv1(x)
        x = self.bn1(x)
        x = self.r1(x)
        x = self.mp1(x)

        x = self.conv2(x)
        x = self.bn2(x)
        x = self.r2(x)
        x = self.mp2(x)

        x = self.conv3(x)
        x = self.bn3(x)
        # x=self.r3(x)
        # x=self.mp3(x)

        x = self.conv4(x)
        x = self.bn4(x)
        x = self.r4(x)

        x = self.conv5(x)
        x = self.bn5(x)
        x = self.r5(x)

        x = self.conv6(x)
        x = self.bn6(x)
        x = self.r6(x)

        x = self.conv7(x)
        x = self.bn7(x)
        x = self.r7(x)
        # print(x.shape)

        x = x.view(x.size(0), -1)
        # print(x.shape)
        # x=self.fc1(x)
        x = self.fc2(x)
        x = self.out(x)
        # print(x.shape)
        return x


if __name__ == "__main__":
    model = cnnNet()
    a = torch.randn(1, 10, 900, 17)
    b = model(a)
    print(a.shape)
    print(b.shape)
