import torch.utils.data
import torchvision.datasets
from torch.utils.tensorboard import SummaryWriter
import time
import ssl

from model import AdamModel

ssl._create_default_https_context = ssl._create_unverified_context
# device = torch.device("cpu")  # 定义一个CPU设备
# device = torch.device("cuda")  # 定义一个GPU设备
device = torch.device("cuda:0")  # 多显卡机器，定义使用第一个显卡

# 构建数据集
train_data = torchvision.datasets.CIFAR10(root="../datasets", train=True, transform=torchvision.transforms.ToTensor(), download=True)
test_data = torchvision.datasets.CIFAR10(root="../datasets", train=False, transform=torchvision.transforms.ToTensor(), download=True)
# 数据加载器
train_dataloder = torch.utils.data.DataLoader(dataset=train_data, batch_size=64)
test_dataloader = torch.utils.data.DataLoader(dataset=test_data, batch_size=64)

print("训练数据集长度: {}".format(len(train_data)))
print("测试数据集长度: {}".format(len(test_data)))

model = AdamModel()
# model = model.to(device)
model.to(device)

# 分类问题，可以采用交叉熵损失函数
loss_function = torch.nn.CrossEntropyLoss()
# loss_function = loss_function.to(device)
loss_function.to(device)

# 优化器，选用随机梯度下降, 学习速率选为 1*10^(-2) = 0.01
learning_rate = 1e-2
optimer = torch.optim.SGD(params=model.parameters(), lr=learning_rate)

# tensorboard 展示
writer = SummaryWriter(log_dir="../logs")

train_step = 0
test_step = 0
epoch = 10
start_time = time.time()

for i in range(epoch):
    print("--------- 第 {} 次训练开始 ------------".format(i + 1))
    total_train_loss = 0
    model.train() # 将模型设置为训练模式
    for imgs, targets in train_dataloder:
        imgs = imgs.to(device)
        targets = targets.to(device)
        output = model(imgs)
        # 计算结果和预期的差距
        loss = loss_function(output, targets)

        # 梯度清零
        optimer.zero_grad()  #梯度清零
        loss.backward()  # 反向传播, 会设置每个model里面的梯度
        optimer.step()  # 调优， 会更新model里面的权重
        train_step += 1
        total_train_loss += loss
        if train_step % 100 == 0:
            print("\t 训练次数: {}, loss: {}".format(train_step, loss.item()))
            writer.add_scalar("train_loss", loss, train_step)
    print("\t 训练数据集的总loss: {}".format(total_train_loss))
    end_time = time.time()
    print(" \t 训练数据集的time:{}, epoch: {}".format((end_time - start_time), epoch))

    total_test_loass = 0
    total_accuracy = 0 # 总体测试正确的个数，一般用于分类问题
    # 测试步骤， 不会进行调优
    model.eval() # 将模型设置为计算模式
    with torch.no_grad():
        for imgs, target in test_dataloader:
            imgs = imgs.to(device)
            target = target.to(device)
            outputs = model(imgs)
            loss = loss_function(outputs, target)
            total_test_loass += loss
            #
            predicts = outputs.argmax(1) # argmax 用于求某个方向上的最大值，1是横向，0是纵向， 如：[0.1 ,0.3 , 0.04, 0.5] 返回的值则为最大的0.5
            accuracy = (predicts == target).sum() # predicts == target 返回类似 [True, False, True] ， .sum() 会将False认为0， True认为1， 即 统计 True的个数
            total_accuracy += accuracy
        test_step += 1
        total_accuracy_rate = (1.0 * total_accuracy) / len(test_data)
        print("\t 测试数据集上的loss: {}".format(total_test_loass))
        print("\t 测试数据集上的正确率: {}".format(total_accuracy_rate))
        writer.add_scalar("test_loss", total_test_loass, test_step)
        writer.add_scalar("test_accuracy", total_accuracy_rate, test_step)

    torch.save(model, "adam_model_{}.ptf".format(i + 1))
    print("模型已经保存!")

