import torch
import torchvision
from torch import nn
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter

# 定义设备
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"使用设备: {device}")

# 下载训练数据集、测试数据集
train_data = torchvision.datasets.CIFAR10(root='./data', train=True, transform=torchvision.transforms.Compose([
    torchvision.transforms.RandomHorizontalFlip(),
    torchvision.transforms.RandomCrop(32, padding=4),
    torchvision.transforms.ToTensor()
]), download=True)

test_data = torchvision.datasets.CIFAR10(root='./data', train=False, transform=torchvision.transforms.ToTensor(), download=True)


train_data_size = len(train_data)
test_data_size = len(test_data)

print(f"训练集数据长度: {train_data_size}")
print(f"测试集数据长度: {test_data_size}")

# DataLoader 读取数据
train_dataloader = DataLoader(train_data, batch_size=128)
test_dataloader = DataLoader(test_data, batch_size=128)

# 定义神经网络模型
class Tudui(nn.Module):
    def __init__(self):
        super(Tudui, self).__init__()
        self.module = nn.Sequential(
            nn.Conv2d(3, 32, kernel_size=5,  stride=1, padding=2),
            nn.BatchNorm2d(32),
            nn.ReLU(),

            nn.MaxPool2d(2),
            nn.Conv2d(32, 32, kernel_size=5, stride=1, padding=2),
            nn.BatchNorm2d(32),
            nn.ReLU(),

            nn.MaxPool2d(2),
            nn.Conv2d(32, 64, kernel_size=5, stride=1, padding=2),
            nn.BatchNorm2d(64),
            nn.ReLU(),

            nn.MaxPool2d(2),
            nn.Flatten(),
            nn.Linear(in_features=64 * 4 * 4, out_features=64),
            nn.ReLU(),
            nn.Dropout(0.5),

            nn.Linear(in_features=64, out_features=10)
        )

    def forward(self, x):
        x = self.module(x)
        return x


# 创建模型
tudui = Tudui()
tudui = tudui.to(device)
print(tudui)

# 定义损失函数
loss_fn = nn.CrossEntropyLoss()
loss_fn = loss_fn.to(device)

# 定义优化器
optim = torch.optim.Adam(tudui.parameters(), lr=0.001)

# 学习率调度器，当验证损失停滞时降低学习率
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
    optim, mode='min', factor=0.5, patience=3
)


# 记录训练次数
train_step_total = 0
# 记录测试次数
test_step_total = 0

# 训练轮次
epochs = 30

writer = SummaryWriter("log")

for epoch in range(epochs):
    print(f"第 {epoch} 轮 开始训练")

    tudui.train()
    for imgs, target in train_dataloader:
        imgs, target = imgs.to(device), target.to(device)

        output = tudui(imgs)

        # 计算损失
        loss = loss_fn(output, target)

        # 梯度归 0
        optim.zero_grad()
        # 反向传播 获取偏导，获取最大下降梯度
        loss.backward()
        # 根据 反向传播最大梯度 进行优化 参数
        optim.step()

        train_step_total += 1
        if train_step_total % 100 == 0:
            print(f"训练次数 {train_step_total}")
            writer.add_scalar("loss", loss.item(), train_step_total)

    # 测试步骤开始
    tudui.eval()
    test_loss_total = 0
    accuracy_total = 0
    with torch.no_grad():
        for imgs, targets in test_dataloader:

            imgs, targets = imgs.to(device), targets.to(device)

            outputs = tudui(imgs)
            loss = loss_fn(outputs, targets)
            test_loss_total += loss.item()

            outputs_argmax = torch.argmax(outputs, dim=1)
            accuracy = (outputs_argmax == targets).sum()
            accuracy_total += accuracy

    print(f"整体测试集上的Loss: {accuracy_total}")
    print(f"整体测试集上的正确率: {accuracy_total/test_data_size}")

    writer.add_scalar("test_loss", test_loss_total, test_step_total)
    writer.add_scalar("test_accuracy", accuracy_total/test_data_size, test_step_total)
    test_step_total += 1

    torch.save(tudui, f"./models/tudui_{epoch}.pth")
    print("模型已保存")

    # 更新学习率
    avg_test_loss = test_step_total / test_data_size
    scheduler.step(avg_test_loss)

    print(f"Epoch {epoch + 1}: 当前学习率 = {optim.param_groups[0]['lr']}")

writer.close()



