import torch
import torchvision
from torch import nn
from torch.nn import Conv2d, MaxPool2d, Flatten, Linear, Sequential
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter

'''
1. 加载CIFAR10数据集并设置数据加载器dataLoader，用于批量读取数据。
2. 定义一个神经网络模型Module，包含卷积层、池化层、展平层和全连接层。
3. 使用交叉熵损失函数CrossEntropyLoss和随机梯度下降优化器SGD进行训练。
4. 在20个epoch中迭代训练，计算损失并更新模型参数。
'''
dataset = torchvision.datasets.CIFAR10("../../data", train=False,
                                       transform=torchvision.transforms.ToTensor(), download=True)
dataLoader = DataLoader(dataset,batch_size=64)

class Module(nn.Module):
    def __init__(self):
        super().__init__()
        self.module1 = Sequential(
            Conv2d(in_channels=3,out_channels=32,kernel_size=5,stride=1,padding=2),
            MaxPool2d(kernel_size=2, ceil_mode=False),
            Conv2d(in_channels=32,out_channels=32,kernel_size=5,stride=1,padding=2),
            MaxPool2d(kernel_size=2, ceil_mode=False),
            Conv2d(in_channels=32,out_channels=64,kernel_size=5,stride=1,padding=2),
            MaxPool2d(kernel_size=2, ceil_mode=False),
            Flatten(),
            Linear(1024,64),
            Linear(64,10)
        )

    def forward(self,x):
        x = self.module1(x)
        return x

module = Module()
print(module)
# 定义优化器，损失函数
optimizer = torch.optim.SGD(module.parameters(),lr=0.01)
loss_cross = nn.CrossEntropyLoss()
# 迭代训练
for epoch in range(20):
    runing_loss = 0.0
    for data in dataLoader:
        optimizer.zero_grad()
        imgs,targets = data
        output = module(imgs)
        loss = loss_cross(output,targets)
        loss.backward()
        optimizer.step()
        runing_loss = runing_loss+loss
        print(loss)
print(runing_loss)