基本完成神经网络训练的基本流程，掌握整个神经网络的训练过程，以下代码揭示了如何使用CIFAR10数据集来训练我们自己写的神经网络Ray模型
import torch
import torchvision
from torch import nn
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter

from model import *

test_data = torchvision.datasets.CIFAR10(root='./data_train', train=False, transform=torchvision.transforms.ToTensor(),
                                         download=True)
train_data = torchvision.datasets.CIFAR10(root='./data_train', train=True, transform=torchvision.transforms.ToTensor(),
                                          download=True)
#  测试集的长度和训练集是不一样的，训练集尝尝需要用来做数据增强
train_data_size = len(train_data)
test_data_size = len(test_data)
print("训练数据集的长度是：{}".format(train_data_size))

#  利用dataloader来加载数据集
train_dataLoader = DataLoader(train_data, batch_size=64)
test_dataLoader = DataLoader(test_data, batch_size=64)

#  搭建神经网络
#  一般习惯讲神经网络单独成文件，方便测试 此处代码迁移至model.py
# class Ray(nn.Module):
#     def __init__(self):
#         super(Ray, self).__init__()
#         self.model = nn.Sequential(
#             #  神经网络序列，卷积，池化，展平，线性层
#             nn.Conv2d(3, 32, 5, 1, 2),
#             nn.MaxPool2d(2),
#             nn.Conv2d(32, 32, 5, 1, 2),
#             nn.MaxPool2d(2),
#             nn.Conv2d(32, 64, 5, 1, 2),
#             nn.MaxPool2d(2),
#             nn.Flatten(),
#             nn.Linear(1024, 64),
#             nn.Linear(64, 10)
#         )
#
#     def forward(self, x):
#         x = self.model(x)
#         return x

#  创建网络模型
ray = Ray()

#  损失函数
loss_fn = nn.CrossEntropyLoss()

#  优化器(学习速率）
learning_rate = 1e-2
optimizer = torch.optim.SGD(ray.parameters(), lr=learning_rate)

#  设置网络的一些参数
train_step = 0
test_step = 0
#  训练的轮数
epoch = 10

#  使用tensorboard 记录训练
writer = SummaryWriter("./train_logs")
for i in range(epoch):
    print("训练轮数:{}".format(i + 1))

    #  开始训练
    for data in train_dataLoader:
        imgs, targets = data
        outputs = ray(imgs)
        loss = loss_fn(outputs, targets)

        #  优化器优化
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()  # 利用最新的step来替换属性

        train_step = train_step + 1
        if train_step % 100 == 0:
            print("训练次数:{},loss:{}".format(train_step, loss.item()))
            writer.add_scalar("train_loss", loss.item(), train_step)
    #  测试步骤开始
    ray.eval()
    test_loss = 0
    test_accuracy = 0
    with torch.no_grad():
        for data in test_dataLoader:
            imgs, targets = data
            outputs = ray(imgs)
            loss = loss_fn(outputs, targets)
            test_loss = test_loss + loss.item()
            accuracy = (outputs.argmax(1) == targets).sum()
            test_accuracy = test_accuracy + accuracy

    print("整体测试集合上的正确率：{}".format(test_accuracy / test_data_size))

    torch.save(ray,"ray_{}.pth".format(i))
    print("模型已保存")
writer.close()