# -*- coding:utf-8 -*-
"""
# @file name    : train_lenet.py
# @author       : QuZhang
# @date         : 2020-12-1 22:22
# @brief        : 人民币分类训练
"""

import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
import torchvision.transforms as transforms
from tools.my_dataset import RMBDataset
from torch.utils.data import DataLoader
from model.lenet import LeNet
import torch.nn as nn
import torch.optim as optim
import torch
import numpy as np
import matplotlib.pyplot as plt


path_lenet = os.path.abspath(os.path.join(BASE_DIR, '..', '..', 'model', "lenet.py"))
path_tools = os.path.abspath(os.path.join(BASE_DIR, '..', '..', 'tools', "common_tools.py"))
assert os.path.exists(path_lenet), "{}不存在,请将lenet.py文件放到{}".format(path_lenet, os.path.dirname(path_lenet))
assert os.path.exists(path_tools), "{}不存在,请将common_tools.py文件放到{}".format(path_tools, os.path.dirname(path_tools))

import sys
hello_pytorch_DIR = os.path.abspath(os.path.dirname(__file__)+os.path.sep+".."+os.path.sep+"..")
sys.path.append(hello_pytorch_DIR)

from tools.common_tools import set_seed
set_seed()  # 设置随机种子

# 设置数据标签
rmb_label = {"1": 0, "100": 1}
# 参数设置
MAX_EPOCH = 10
BATCH_SIZE = 16
LR = 0.01
log_interval = 10  # 日志间隔,记录一个epoch要读取数据多少次,需要根据数据集的数据量来计算
val_interval = 1


if __name__ == '__main__':
    # =============== step 1/5 数据 ==================
    # 1.1 设置路径
    split_dir = os.path.abspath(os.path.join(BASE_DIR, "..", '..', "data", "rmb_split"))
    if not os.path.exists(split_dir):
        raise Exception(r"数据 {} 不存在,回到less-06\lesson-06-1_split_dataset.py生成数据".format(split_dir))
    train_dir = os.path.join(split_dir, "train")
    valid_dir = os.path.join(split_dir, "valid")

    # 1.2 数据预处理
    # 设置图像像素值的概率分布模型,用于标准化，加快收敛速度
    norm_mean = [0.485, 0.456, 0.406]  # 逐通道的像素均值
    norm_std = [0.229, 0.224, 0.225]  # 逐通道的像素标准差
    train_transform = transforms.Compose(
        [
            transforms.Resize((32, 32)),
            transforms.RandomCrop(32, padding=4),
            transforms.ToTensor(),
            transforms.Normalize(norm_mean, norm_std),
        ]
    )
    valid_transform = transforms.Compose([
        transforms.Resize((32, 32)),
        transforms.ToTensor(),
        transforms.Normalize(norm_mean, norm_std),
    ])

    # 1.3 构建MyDataset实例,用于设置数据集,记录图片的路径和标签
    train_data = RMBDataset(data_dir=train_dir, transform=train_transform)
    valid_data = RMBDataset(data_dir=valid_dir, transform=valid_transform)
    # print("len: {}".format(len(train_data.data_info)))
    # print("len: {}".format(len(valid_data.data_info)))

    # 1.4 构建DataLoader,用于读取数据集里面的数据
    train_loader = DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True)  # shuffle设置每一个epoch是否打乱数据
    valid_loader = DataLoader(dataset=valid_data, batch_size=BATCH_SIZE)
    # print("*" * 20)
    # len(train_loader)  # DataLoader内部有__len__函数，会更据数据总量和批大小来计算长度，也就是每一个epoch(读完所有数据)需要读取多少次数据
    # print("train_loader type: {}, len: {}".format(type(train_loader), len(train_loader)))
    # print("=" * 20)
    # print("valid_loader type: {}, len: {}".format(type(valid_loader), len(valid_loader)))

    # --------------- step 2/5 模型 ---------------
    net = LeNet(classes=2)
    net.initialize_weights()

    # --------------- step 3/5 损失函数 --------------
    criterion = nn.CrossEntropyLoss()

    # =============== step 4/5 优化器 ===============
    optimizer = optim.SGD(net.parameters(), lr=LR, momentum=0.9)  # 选择优化器
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1)  # 设置学习率下降策略

    # ================ step 5/5 训练 ===============
    train_curve = list()
    valid_curve = list()

    for epoch in range(MAX_EPOCH):

        loss_mean = 0.
        correct = 0.
        total = 0.

        net.train()  # 训练模型时的状态设置
        for i, data in enumerate(train_loader):
            # 读取训练集的数据进行训练：先到DataLoader获取要读取图片对应的索引index，再到RMBDataset里面根据index获取图片
            # 会更据train_loader的长度，判断读多少次，达到长度后就结束，跳出该循环

            # 1.forward
            # 批量加载的图片一般是4维的张量(由多个三维张量构成)：图片张数，每张图片是一个三维张量,每张图片的每个通道是一个二维张量
            inputs, labels = data
            # print("inputs: {}".format(inputs))  # 各通道像素值标准化后对应的图像信息，一个通道一个矩阵，一张图片3个矩阵,一张图片就是三维空间
            # print("labels: ", labels)  # 图像对应的分类标签
            outputs = net(inputs)  # 计算预测值

            # 2.backward
            optimizer.zero_grad()
            loss = criterion(outputs, labels)  # 计算损失
            loss.backward()

            # 3.update weights
            optimizer.step()

            # 4.统计分类情况
            # print("outputs:", outputs)
            # print("outputs.data: {}".format(outputs.data))
            _, predicted = torch.max(input=outputs.data, dim=1)  # 求出张量对应的每行的最大值索引,最大值索引就是分类预测值
            total += labels.size(0)  # 求lables的行数，若为一维张量，则计算有多少个元素
            correct += (predicted == labels).squeeze().sum().numpy()  # 计算有多少个分类正确

            # 打印训练信息
            # print("loss type: {}, loss.item: {}".format(type(loss), loss.item()))
            # 获取此次的损失值
            loss_mean += loss.item()  # 获取只有一个元素的张量的元素值
            train_curve.append(loss.item())
            if (i+1) % log_interval == 0:
                # 一个epoch：160张图片,每次读取16张，10次可以看做一个epoch
                loss_mean = loss_mean / log_interval
                print("Training:Epoch[{:0>3}/{:0>3}] Iteration[{:0>3}/{:0>3}] Loss:{:.4f} Acc:{:.2%}".format(
                    epoch, MAX_EPOCH, i+1, len(train_loader), loss_mean, correct / total
                ))
                loss_mean = 0.

        scheduler.step()  # 更新学习率

        # validate the model
        if (epoch+1) % val_interval == 0:
            # 一个epoch进行一次验证

            correct_val = 0.
            total_val = 0.
            loss_val = 0.
            net.eval()  # 测试模型时的状态设置
            with torch.no_grad():
                for j, data in enumerate(valid_loader):
                    # print("*"*20)
                    # print("data_loader type: {}, len: {}".format(type(valid_loader), len(valid_loader)))

                    inputs, labels = data
                    outputs = net(inputs)
                    loss = criterion(outputs, labels)  # 读取batch_size个样本验证，batch_size个样本损失的均值

                    _, predicted = torch.max(outputs, 1)
                    total_val += labels.size(0)
                    correct_val += (predicted == labels).squeeze().sum().numpy()

                    loss_val += loss.item()  # 一个epoch，读取完所有样本的损失总和

                loss_val_epoch = loss_val / len(valid_loader)  # 长度为多少，就读取了多少次数据，计算了多少个损失均值的和
                valid_curve.append(loss_val_epoch)
                print("Valid:\t Epoch[{:0>3}/{:0>3}] Iteration[{:0>3}/{:0>3}] Loss: {:.4f} Acc:{:.2%}".format(
                    epoch, MAX_EPOCH, j+1, len(valid_loader), loss_val_epoch, correct_val/total_val))

    train_x = range(len(train_curve))
    train_y = train_curve

    train_iters = len(train_loader)   # 一个epoch的训练次数
    valid_x = np.arange(1, len(valid_curve)+1) * train_iters * val_interval
    valid_y = valid_curve

    plt.plot(train_x, train_y, label="Train")
    plt.plot(valid_x, valid_y, label="Valid")

    plt.legend(loc="upper right")
    plt.ylabel("loss value")
    plt.xlabel('Iteration')
    plt.show()

    # ================ inference ===============
    BASE_DIR = os.path.dirname(os.path.abspath(__file__))
    test_dir = os.path.join(BASE_DIR, "test_data")

    test_data = RMBDataset(data_dir=test_dir, transform=valid_transform)
    test_loader = DataLoader(dataset=test_data, batch_size=1)

    for i, data in enumerate(test_loader):
        # forward
        inputs, labels = data
        outputs = net(inputs)
        _, predicted = torch.max(outputs.data, 1)

        rmb = 1 if predicted.numpy()[0] == 0 else 100
        print("模型获取{}元".format(rmb))

