# -*- coding:utf-8 -*-
"""
# @file name    : create_module.py
# @author       : QuZhang
# @date         : 2020-12-13 9:03
# @brief        : 学习模型创建学习
"""
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
import sys
code_DIR = os.path.abspath(os.path.dirname(__file__)+os.path.sep+'..'+os.path.sep+'..')
sys.path.append(code_DIR)

path_lenet = os.path.abspath(os.path.join(BASE_DIR, '..', '..', 'model', 'lenet.py'))
path_tools = os.path.abspath(os.path.join(BASE_DIR, '..', '..', "tools", 'common_tools.py'))
assert os.path.exists(path_lenet), "{} 不存在, 请将lenet.py文件放到{}下".format(path_lenet, os.path.dirname(path_lenet))
assert os.path.exists(path_tools), "{} 不存在, 请将common_tools.py放到{}下".format(path_tools, os.path.dirname(path_tools))
from tools.common_tools import set_seed
from torchvision.transforms import transforms
from tools.my_dataset import RMBDataset
from torch.utils.data import DataLoader
from model.lenet import LeNet
import torch.nn as nn
import torch.optim as optim
import torch
from matplotlib import pyplot as plt
import numpy as np

set_seed()
rmb_label = {"1": 0, "100": 1}

# 超参数设置
MAX_EPOCH = 10
BATCH_SIZE = 16
LR = 0.01
log_interval = 10
val_interval = 1


if __name__ == '__main__':
    # ============= step 1/5 数据 ==============
    split_dir = os.path.abspath(os.path.join(BASE_DIR, '..', '..', 'data', 'rmb_split'))
    assert os.path.exists(split_dir), "数据{}不存在, 回到回到lesson-06\_split_dataset.py生成数据".format(split_dir)
    train_dir = os.path.join(split_dir, 'train')
    valid_dir = os.path.join(split_dir, 'valid')

    norm_mean = [0.485, 0.456, 0.406]
    norm_std = [0.229, 0.224, 0.225]

    train_transform = transforms.Compose([
        transforms.Resize((32, 32)),
        transforms.RandomCrop(32, padding=4),
        transforms.ToTensor(),
        transforms.Normalize(norm_mean, norm_std),
    ])

    valid_transform = transforms.Compose([
        transforms.Resize((32, 32)),
        transforms.ToTensor(),
        transforms.Normalize(norm_mean, norm_std),
    ])

    # 创建数据
    train_data = RMBDataset(data_dir=train_dir, transform=train_transform)
    valid_data = RMBDataset(data_dir=valid_dir, transform=valid_transform)

    # 创建加载数据的对象
    train_loader = DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True)
    valid_loader = DataLoader(dataset=valid_data, batch_size=BATCH_SIZE)

    # ================ step 2/5 模型 =============
    net = LeNet(classes=2)
    net.initialize_weights()

    # ============ step 3/5 损失函数 ==============
    criterion = nn.CrossEntropyLoss()

    # ============ step 4/5 优化器 ===============
    optimizer = optim.SGD(net.parameters(), lr=LR, momentum=0.9)
    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1)

    # ============== step 5/5 训练 ==================
    train_curve = list()
    valid_curve = list()

    for epoch in range(MAX_EPOCH):
        loss_mean = 0.
        correct = 0.
        total = 0.

        net.train()  # 训练阶段，修改权值
        for i, data in enumerate(train_loader):

            # forward
            inputs, labels = data
            outputs = net(inputs)

            # backward
            optimizer.zero_grad()
            loss = criterion(outputs, labels)
            loss.backward()

            # update weights
            optimizer.step()

            # 统计分类情况
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).squeeze().sum().numpy()

            # 打印训练信息
            loss_mean += loss.item()
            train_curve.append(loss.item())
            if (i + 1) % log_interval == 0:
                loss_mean = loss_mean / log_interval
                print("Training:Epoch[{:0>3}/{:0>3}] Iteration[{:0>3}/{:0>3}] Loss: {:.4f} Acc:{:.2%}".format(
                    epoch, MAX_EPOCH, i + 1, len(train_loader), loss_mean, correct / total))
                loss_mean = 0.

        scheduler.step()  # 更新学习率

        # validate the model
        if (epoch+1) % val_interval == 0:
            correct_val = 0.
            total_val = 0.
            loss_val = 0.

            net.eval()  # 验证/测试阶段，不计算梯度,不更新权值
            with torch.no_grad():
                for j, data in enumerate(valid_loader):
                    inputs, labels = data
                    outputs = net(inputs)
                    loss = criterion(outputs, labels)
                    _, predicted = torch.max(outputs.data, 1)
                    total_val += labels.size(0)
                    correct_val += (predicted == labels).squeeze().sum().numpy()

                    loss_val += loss.item()

                valid_curve.append(loss_val)
                print("Valid:\t Epoch[{:0>3}/{:0>3}] Iteration[{:0>3}/{:0>3}] Loss: {:.4f} Acc:{:.2%}".format(
                    epoch, MAX_EPOCH, j + 1, len(valid_loader), loss_val, correct / total))


    train_x = range(len(train_curve))
    train_y = train_curve

    train_iters = len(train_loader)
    valid_x = np.arange(1, len(valid_curve)+1) * train_iters*val_interval # 由于valid中记录的是epochloss，需要对记录点进行转换到iterations
    valid_y = valid_curve

    plt.plot(train_x, train_y, label='Train')
    plt.plot(valid_x, valid_y, label='Valid')

    plt.legend(loc='upper right')
    plt.ylabel('loss value')
    plt.xlabel('Iteration')
    plt.show()