"""
池化层
"""
import torch
import torchvision
from torch import nn
from torch.nn import Conv2d, MaxPool2d
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter

dataset = torchvision.datasets.CIFAR10("../data", train=False, transform=torchvision.transforms.ToTensor(),
                                       download=True)
dataloader = DataLoader(dataset, batch_size=64)


# input = torch.tensor([
#     [1, 2, 0, 3, 1],
#     [0, 1, 2, 3, 1],
#     [1, 2, 1, 0, 0],
#     [5, 2, 3, 1, 1],
#     [2, 1, 0, 1, 1]
# ], dtype=torch.float32)
# input = torch.reshape(input, (-1, 1, 5, 5))
# print(input)


class Tudui(nn.Module):
    def __init__(self):
        super(Tudui, self).__init__()
        # ceil_mode为True时有超出的边界部分也会保留
        # 保留数据的同时减少数据量
        self.maxool1 = MaxPool2d(kernel_size=3, ceil_mode=True)

    def forward(self, x):
        x = self.maxool1(x)
        return x


tudui = Tudui()
print(tudui)
# output = tudui(input)
# print(output)

writer = SummaryWriter("../logs_maxpool")
step = 0
for data in dataloader:
    imgs, targets = data
    writer.add_images("input", imgs, step)
    output = tudui(imgs)

    # output: torch.Size([64, 3, 11, 11])
    print("output:", output.shape)
    # imgs: torch.Size([64, 3, 32, 32])
    print("imgs:", imgs.shape)

    # imgs和output的数据维度要保持一样
    # 此处经过池化，维度没有发生变化，不需要reshape
    writer.add_images("output", output, step)
    step = step + 1

writer.close()
