# 搭建基础的卷积神经网络 CNN
import torch

in_channels, out_channels = 5, 10
# 高度、宽度
width, height = 100, 100
# 卷积核的大小 3*3
kernel_size = 3
# 批处理大小
batch_size = 1

# 随即一个输入
input = torch.randn(batch_size,
                    in_channels,
                    width,
                    height)

# 卷积层
conv_layer = torch.nn.Conv2d(in_channels,
                             out_channels,
                             kernel_size=kernel_size)
# 训练模型
output = conv_layer(input)
print('输入大小', input.shape)
print('输出大小', output.shape)
print('卷积核维度', conv_layer.weight.shape)

# 查看卷积结果
input = [3, 4, 6, 5, 7,
         2, 4, 6, 8, 2,
         1, 6, 7, 8, 4,
         9, 7, 4, 6, 2,
         3, 7, 5, 4, 1]

# 输入大小
input = torch.Tensor(input).view(1, 1, 5, 5)
# 定义2D卷积，参数依次为 输入通道数、输出通道数、卷积核大小、填充大小、是否使用偏置项
conv_layer = torch.nn.Conv2d(1, 1, kernel_size=3, padding=1, bias=False)
# 卷积核维度，1个卷积核，1个通道数，3*3的矩阵
kernel = torch.Tensor([1, 2, 3, 4, 5, 6, 7, 8, 9]).view(1, 1, 3, 3)
conv_layer.weight.data = kernel.data
output = conv_layer(input)
# 卷积之后的输出
print(output)

# 池化操作
# 输入
input = [3, 4, 6, 5,
         2, 4, 6, 8,
         1, 6, 7, 8,
         9, 7, 4, 6]
# 改变输入的形状
input = torch.Tensor(input).view(1, 1, 4, 4)
# 最大池化结构
maxpooling_layer = torch.nn.MaxPool2d(kernel_size=2)
output = maxpooling_layer(input)
# 池化结果
print(output)
