import torch
import torchvision
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter

from dataset import *
from model import *

# 可视化展示数据
writer = SummaryWriter("./logs")

# 选择运行设备
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# 数据根目录
data_root = "../mnist_jpg"

# 利用transforms对数据进行处理
transforms = torchvision.transforms.Compose([
    torchvision.transforms.Resize((28, 28)),
    torchvision.transforms.ToTensor(),
    torchvision.transforms.Normalize((0.5,), (0.5,))
])

# 加载数据
train_data = MyDataReader(root_path=data_root, is_train=True, transforms=transforms)
test_data = MyDataReader(root_path=data_root, is_train=False, transforms=transforms)

# 输出训练集和测试集的规模
train_data_size = len(train_data)
test_data_size = len(test_data)
print(f"训练集的规模为：{train_data_size}")
print(f"测试集的规模为：{test_data_size}")

# 处理数据的批次大小
batch_size = 64

# 使用DataLoader处理数据
train_dataloader = DataLoader(dataset=train_data, batch_size=batch_size)
test_dataloader = DataLoader(dataset=test_data, batch_size=batch_size)

# 创建模型
model = MyModel()
model = model.to(device)

# 损失函数
loss_fn = nn.CrossEntropyLoss()
loss_fn = loss_fn.to(device)

# 优化器
learning_rate = 1e-2
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)

# 训练过程参数设置
epoch = 50
total_train_step = 0

# 训练开始
for epoch in range(epoch):
    print(f"----------第{epoch + 1}轮训练开始----------")

    # 开始训练
    total_train_loss = 0
    model.train()
    for data in train_dataloader:
        images, targets = data
        images = images.to(device)
        targets = targets.to(device)
        outputs = model(images)
        loss = loss_fn(outputs, targets)

        # 利用优化器进行优化
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # 计算总训练次数
        total_train_step = total_train_step + 1

        # 每100次输出一次loss
        if (total_train_step + 1) % 100 == 0:
            print(f"第{total_train_step + 1}次训练的Loss：{loss}")
            writer.add_scalar("train_loss", loss, (total_train_step + 1) / 100)

    # 开始测试
    total_test_loss = 0
    total_test_accuracy_num = 0
    best_test_accuracy_num = 0
    model.eval()
    with torch.no_grad():
        for data in test_dataloader:
            images, targets = data
            images = images.to(device)
            targets = targets.to(device)
            outputs = model(images)
            loss = loss_fn(outputs, targets)
            accuracy_num = (outputs.argmax(1) == targets).sum().item()

            # 计算测试的总体损失值和预测正确个数
            total_test_loss = total_test_loss + loss.item()
            total_test_accuracy_num = total_test_accuracy_num + accuracy_num

    # 计算平均损失值
    avg_test_loss = total_test_loss / test_data_size

    # 计算正确率
    total_test_accuracy = total_test_accuracy_num / test_data_size

    print(f"在测试集上的平均损失值Loss：{avg_test_loss}")
    print(f"在测试集上的正确率accuracy：{total_test_accuracy}")
    writer.add_scalar("test_loss", avg_test_loss, epoch + 1)
    writer.add_scalar("test_accuracy", total_test_accuracy, epoch + 1)

    # 预测能力最好的时候保存模型
    if total_test_accuracy_num > best_test_accuracy_num:
        best_test_accuracy_num = total_test_accuracy_num
        torch.save(model, "best_model.pth")

writer.close()

