
import torch
from torch import nn
from torch.utils.tensorboard import SummaryWriter
from torchvision import datasets
from tqdm import tqdm

from data.dataloader import data_set
from main.repository import resnet
from main.utils import optimizerspec
from main.utils.pathmanager import build_model_path


def goto_look_picture():
    # 数据集
    cifar10 = data_set(datasets.CIFAR10)

    # 模型
    model = resnet.DQN(in_channels=3, block=resnet.BasicBlock, num_actions=10)

    # 训练集
    trainloader = cifar10(train=True, shuffle=True)

    # 在GPU上训练模型
    _device = model.to_gpu()

    # 交叉熵损失函数
    criterion = nn.CrossEntropyLoss()
    criterion.to(_device)

    # 创建SummaryWriter实例
    writer = SummaryWriter(log_dir='./logs/cifar10_experiment1')

    # 学习率
    # optimizer = optimizerspec.rms_prop(model)
    optimizer = optimizerspec.sdg(model)
    for epoch in range(5):
        # 记录损失
        running_loss = 0.0
        # 使用tqdm来创建一个进度条
        for i, data in enumerate(tqdm(trainloader, desc=f'Epoch {epoch + 1}'), 0):
            inputs, labels = data[0].to(_device), data[1].to(_device)

            # 首先将优化器梯度归零
            optimizer.zero_grad()

            # 输入图像张量进网络, 得到输出张量outputs
            outputs = model(inputs)

            # 利用网络的输出outputs和标签labels计算损失值
            loss = criterion(outputs, labels)

            # 反向传播+参数更新
            loss.backward()
            optimizer.step()

            # 打印轮次和损失值
            running_loss += loss.item()

            # 每100个batch打印一次平均损失，并写入TensorBoard
            if (i + 1) % 100 == 0:
                avg_loss = running_loss / 100
                # print(f'Epoch {epoch + 1}, Batch {i + 1}: Loss {avg_loss:.4f}')
                writer.add_scalar('Training Loss', avg_loss, epoch * len(trainloader) + i)
                running_loss = 0.0
    pass

    print('Finished Training')
    # 首先设定模型的保存路径
    # 保存模型的状态字典
    torch.save(model.state_dict(), build_model_path('DQN.pth'))


if __name__ == '__main__':
    goto_look_picture()
