# -*- codeing = utf-8 -*-
# @Time : 2022/2/18 13:31
# @File : train.py
# @Software : PyCharm
import time

import config
import os
import torch
from data_set import get_train_dataset,get_test_dataset, get_transform
from model import Net
import torch.nn.functional as F


# 检查GPU是否可用,入股偶有GPU则使用cuda，模型和数据可以使用Net().to(DEVICE)转为GPU的Tensor
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")


def train_model(ui):
    ui.progressBar.setHidden(False)
    all_start = time.time()
    train_loader = get_train_dataset(batch_size=config.BATCH_SIZE)
    net = Net().to(DEVICE)
    all = (config.EPOCHS * len(train_loader))
    # Adam优化器
    optimizer = torch.optim.Adam(net.parameters(), lr=0.001)
    corrent = 0
    for epoch in range(config.EPOCHS):
        mes = "\n------------------------第 " + str(epoch) + " 波训练------------------------\n\n"
        ui.printf(mes)
        for step, (x, y) in enumerate(train_loader):
            start = time.time()
            x, y = x.to(DEVICE), y.to(DEVICE)
            output = net(x)
            # 使用最大似然/log似然代价函数
            # 损失函数： output是训练过程，y死是测试过程，计算两者误差，进行求和
            loss = F.nll_loss(output, y)
            # pytorch会梯度累计，所以要清空
            optimizer.zero_grad()
            # 反向传播
            loss.backward()
            # 使用Adam进行梯度更新
            optimizer.step()
            end = time.time()

            if (step + 1) % 3 == 0:
                mes = '训练合集: {} [{}/{} ({:.0f}%)]\t损失函数: {:.6f}\t用时 ：{:.3f} s'.format(
                    epoch + 1, step * len(x), len(train_loader.dataset),
                    100. * step / len(train_loader), loss.item(), (end - start))
                # print(mes)
                ui.printf(mes)

                ui.progressBar.setValue(int(100 / (all / corrent)))

            corrent += 1

    all_end = time.time()
    ui.printf("\n进入测试...\n")
    # 使用验证机测试模型效果
    mes = test(net, get_test_dataset(batch_size=config.BATCH_SIZE))
    ui.printf(mes + ",总训练时长 ：{:.3f} s".format((all_end - all_start)))
    # 保存模型权重
    torch.save(net.state_dict(), os.path.join(config.DATA_MODEL, config.DEFAULT_MODEL))

    ui.progressBar.setHidden(True)
    return net


def test(model, test_loader):
    model.eval()
    test_loss = 0 # 验证集上的损失
    correct = 0 # 准确率
    with torch.no_grad():
        for x, y in test_loader:
            x, y = x.to(DEVICE), y.to(DEVICE)
            output = model(x)
            test_loss += F.nll_loss(output, y, reduction='sum').item()
            pred = output.max(1, keepdim=True)[1]
            correct += pred.eq(y.view_as(pred)).sum().item()

    test_loss /= len(test_loader.dataset)
    mes = '\n测试集损失函数={:.4f}, 精确度={:.4f}'.format(test_loss, float(correct) / len(test_loader.dataset))
    # print(mes)
    return mes


# 加载现有的模型进行预测
def predict_model(image):
    data_transform = get_transform()
    # 对图片进行预处理，同训练的时候一样
    image = data_transform(image)
    image = image.view(-1, 3, 32, 32)
    net = Net().to(DEVICE)
    # 加载模型参数权重
    net.load_state_dict(torch.load(os.path.join(config.DATA_MODEL, config.DEFAULT_MODEL)))
    output = net(image.to(DEVICE))
    # print(output)
    max = torch.max(output).item()
    # print(max)
    # 拿到最大概率的下标
    pred = output.max(1, keepdim=True)[1]
    # print(pred.item())
    if max < -0.001:
        return 0

    return pred.item()+1
