#
# @file yrbasin_g09_train.py
#
# @author shenyc
# @date 2023-01-12
# @description 训练数据集
#
# https://blog.csdn.net/andyL_05/article/details/103363603?spm=1001.2101.3001.6661.1&utm_medium=distribute.pc_relevant_t0.none-task-blog-2%7Edefault%7ECTRLIST%7ERate-1-103363603-blog-120861316.pc_relevant_aa&depth_1-utm_source=distribute.pc_relevant_t0.none-task-blog-2%7Edefault%7ECTRLIST%7ERate-1-103363603-blog-120861316.pc_relevant_aa&utm_relevant_index=1
#
###############################################################################
#
import os
import time
import torch
import matplotlib.pyplot as plt
import torch.nn.functional as F
from torch import Tensor
from torch.utils.data import DataLoader

import yrbasin
# from yrbasin import CNNFeature12 as NET
# from yrbasin import CNNFeature13 as NET
from yrbasin import CNNFeature21 as NET
# --------------------------------------------------------------
from yrbasin_g09_common import TRANSFORM_COMPOSE
from yrbasin_g09_common import (
    ROOT, TRANS_METHOD, PHYSCOUNT, OUTPUT_PATH,
    L1_FOLDER_RAIN, L1_FOLDER_STATION, L1_FOLDER_TRAIN, L1_FOLDER_PREDICT
)
# --------------------------------------------------------------
YRBASIN_KINDS = yrbasin.KINDS
# --------------------------------------------------------------
L2_FOLDER_TRANS = f"T{TRANS_METHOD}"              # 版本号 (转换方法id)
L3_FOLDER_FEATURE = f"Feature{PHYSCOUNT}"         # 特征数据集文件夹
# --------------------------------------------------------------
TRANSFORM = TRANSFORM_COMPOSE
# YRBASIN_ROOT_G09 = f"{ROOT}\\{L1_FOLDER_TRAIN}\\{L2_FOLDER_TRANS}\\{L3_FOLDER_FEATURE}\\km100"
YRBASIN_ROOT_G09 = f"{ROOT}\\{L1_FOLDER_TRAIN}\\{L2_FOLDER_TRANS}\\{L3_FOLDER_FEATURE}\\km200"

# Batch Normalization
# Layer Normalization、instance Normalization、Group Normalization。

#
# 超参数
#
# lr_list = [0.01, 0.03, 0.05, 0.08]
LR = 0.01           # 学习率 0.01
EPOCHS = 99         # 轮次
BATCH_SIZE = 64     # 批尺寸

#
SHUFFLE = True      # 混选
YRBASIN_ROOT = YRBASIN_ROOT_G09
# DEVICE = torch.device("cpu")
DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

TRAIN = 0
TEST = 1

# 数据集
train_data = yrbasin.get_yrbasin(
    YRBASIN_ROOT, data_flag=TRAIN, transform=TRANSFORM)
train_loader = DataLoader(train_data, batch_size=BATCH_SIZE, shuffle=SHUFFLE)
train_data_size = train_data.length

# 测试集 (验证集 - verify)
test_data = yrbasin.get_yrbasin(
    YRBASIN_ROOT, data_flag=TEST, transform=TRANSFORM)
test_loader = DataLoader(test_data, batch_size=BATCH_SIZE, shuffle=SHUFFLE)
test_data_size = len(test_data)

print(f"训练数据集的长度为 {train_data_size}")
print("测试数据集的长度为 {}".format(test_data_size))
print(train_data.features.shape)


class Net_Demo(torch.nn.Module):
    def __init__(self, nLayers: int = 21, nClasses: int = 5):
        """
        参数：
            nLayers: 训练集层数
            nClasses: 输出分类数量
        """
        super(Net_Demo, self).__init__()
        # [21, 9, 9]
        self.model = torch.nn.Sequential(
            # 卷积层C1
            torch.nn.Conv2d(nLayers, nLayers, 1),           # [21, 9, 9]
            torch.nn.MaxPool2d(2, stride=1),                # [21, 8, 8]
            torch.nn.ReLU(),
            # 卷积层C2
            torch.nn.Conv2d(nLayers, 32, 3, padding=1),     # [32, 8, 8]
            torch.nn.ReLU(),
            # 卷积层C3
            torch.nn.Conv2d(32, 64, 3, padding=1),          # [64, 8, 8]
            torch.nn.MaxPool2d(2),                          # [64, 4, 4]
            torch.nn.ReLU(),

            # 扁平化
            torch.nn.Flatten(),  # [1024] <= 64*4*4

            # 全连接层FC1
            torch.nn.Linear(1024, 256),
            torch.nn.ReLU(),

            # 全连接层FC2
            torch.nn.Linear(256, 64),
            torch.nn.ReLU(),

            # 全连接层FC3
            torch.nn.Linear(64, nClasses),
        )

    def forward(self, input: torch.Tensor):
        output = self.model(input)
        return output


def MY_TEST():
    net = Net_Demo()
    loss_fn = torch.nn.CrossEntropyLoss()
    train_loader_iter = iter(train_loader)
    features, targets = next(train_loader_iter)

    net = net.to(DEVICE)
    loss_fn = loss_fn.to(DEVICE)
    features = torch.Tensor(features).to(DEVICE)
    targets = torch.Tensor(targets).to(DEVICE)

    output: torch.Tensor = net(features)
    _, pred = torch.max(output.data, dim=1)
    n = torch.Tensor(pred == targets).sum().item()  # 预测对的数量
    print(output.data.shape)    # [batch_size, 5]
    loss = loss_fn(output, targets)
    pass


# MY_TEST()


# 神经网络
model = NET()
model = model.to(DEVICE)

# 损失函数
criterion = torch.nn.CrossEntropyLoss()
criterion = criterion.to(DEVICE)

# 优化器 - 随机梯度下降 (SGD)
# optimizer = torch.optim.SGD(model.parameters(), lr=LR)
optimizer = torch.optim.SGD(model.parameters(), lr=LR, momentum=0.1)


def train(epoch: int, start_time: float):
    """
    训练数据

    参数:
        epoch: 轮次
        start_time: 训练开始时间
    """
    model.train()
    running_loss = 0.0  # 每一轮训练重新记录损失值
    for step, data in enumerate(train_loader, 0):
        # 迭代获取数据
        features, targets = data
        features = torch.Tensor(features).to(DEVICE)
        targets = torch.Tensor(targets).to(DEVICE)

        # 进入网络模型
        output = model(features)            # 代入模型
        loss = criterion(output, targets)   # 计算损失值

        # 反向传播
        optimizer.zero_grad()               # 将梯度归零
        torch.Tensor(loss).backward()       # 反向传播计算得到每个参数的梯度值
        optimizer.step()                    # 梯度下降参数更新

        # 损失值累加
        running_loss += torch.Tensor(loss).item()
        # print('Loss: %.3f' % (running_loss / 100))

        # 每 100 个样本输出一下结果
        astep = 100
        if step % astep == astep-1:
            t = time.time() - start_time
            print('[%2d,%5d] Loss: %.3f Elapsed: %f' %   #
                  (epoch + 1, step + 1, running_loss / 100, t))
            running_loss = 0.0
        # endif
    # endfor
    return (100 * running_loss / astep)


def eval():
    """
    评估正确率
    """
    model.eval()
    total = 0
    correct = 0
    with torch.no_grad():
        for data in test_loader:
            # 迭代获取数据
            features, targets = data
            features = torch.Tensor(features).to(DEVICE)
            targets = torch.Tensor(targets).to(DEVICE)

            # 进入网络模型
            output: torch.Tensor = model(features)  # 代入模型
            _, pred = torch.max(output.data, dim=1)  # 获得结果中的最大值
            correct += torch.Tensor(pred == targets).sum().item()  # 累计正确数
            total += targets.size(0)  # 参与评估的样本总数
        # end_for
        print('正确率: %2d %%' % (100 * correct / total))
    # end_with
    return (100 * correct / total)


def save_model(epoch: int):
    """
    保存轮次模型

    参数:
        epoch: 轮次
        model: saved object
    """
    mpath = os.path.join(OUTPUT_PATH, "models")
    os.makedirs(mpath, exist_ok=True)
    mpath = os.path.join(OUTPUT_PATH, "models")
    mfile = "model_yrbasin_epoch_{0:0>2d}.pth".format(epoch+1)
    torch.save(model, os.path.join(mpath, mfile))
    return


def save_train_info(epochx: list, lossy: list, accuracy: list):
    """
    保存训练信息

    参数:
        epochx:     训练轮数列表
        lossy:      训练损失值列表
        accuracy:   训练正确率列表
    """
    fpath = OUTPUT_PATH
    fname = os.path.join(fpath, "train_info.txt")
    with open(fname, 'w', encoding='utf-8') as f:
        f.write("epoch,loss,accurac\n")
        for i in range(len(epochx)):
            lstr = "{0},{1:.3f},{2:.3f}\n".format(
                epochx[i], lossy[i], accuracy[i])
            f.write(lstr)
        # endfor
    # endwith
    return


def save_train_figure(epochs: int, xepoch: list, yloss: list, yaccurac: list):
    """
    可视化一下训练过程
    """
    plt.plot(xepoch, yloss, yaccurac)
    plt.minorticks_on()  # 添加次要刻度线
    plt.grid(True, which="both", axis="both")
    fig = f"{OUTPUT_PATH}/train_figure_{epochs:02d}.png"
    plt.savefig(fig)
    # plt.show()
    pass


def train_main(epochs: int = EPOCHS):
    """
    训练

    参数
        epochs: 训练轮次
    """
    xepoch = []     # 训练轮次数列表 (x 轴)
    yloss = []      # 训练损失值列表 (y 轴)
    yaccurac = []   # 训练正确率列表 (y 轴)

    start_time = time.time()
    for epoch in range(epochs):
        xepoch.append(epoch)
        yloss.append(train(epoch, start_time))  # 数据训练
        yaccurac.append(eval())                 # 评估正确率
        save_model(epoch)                       # 保存模型
    # endfor

    # 保存训练信息
    save_train_info(xepoch, yloss, yaccurac)
    # 保存训练曲线
    save_train_figure(epochs, xepoch, yloss, yaccurac)
    return


"""
__main__
"""
if __name__ == '__main__':
    # x = 10
    train_main()
    # train_main(10)
    pass


"""
【K折交叉验证技术】

训练和测试模型的传统技术是将数据拆分为两种不同的拆分， 称为训练和测试拆分， 两者比例一般为 70% : 30%。
但是这种方式会产生一些问题：为了训练最佳性能的模型，我们需要手动对超参数进行适当的调整，以利用测试
数据实现良好的模型性能，这种静态的方式会导致在测试集上出现过度拟合的风险。因为有关测试集的知识或着
信息会"泄漏"到模型中，评估指标就不是真实的反映模型泛化性能。

为了解决上述问题，进一步做训练、验证和测试三个拆分：模型超参数使用训练和验证集进行优化，最后使用测试数据确定模型泛化性能。
但是这种技术也有缺点，通过将数据划分为三组，会减少用于学习模型的样本数量，而且结果取决于对（训练、验证）集的特定随机选择。
因此为了克服上述种种问题， 交叉验证技术就应运而生。我们对数据进行两种不同的拆分， 即训练和测试拆分， 然后通过再对训练数据
的K折划分, 其中 (K-1) 折用于训练, 剩下一折用于验证, 这个过程重复K次, 并通过获取训练的所有K个模型的平均值和标准差来计算一
组特定超参数的模型性能分数，计算给出最优模型的超参数。最后，模型使用最优超参数在整个训练数据集上再次训练，并通过计算测试
数据集上的评估分数来计算泛化性能。
"""

"""
【1x1卷积核的作用】

1 增加网络深度（增加非线性映射次数）
    在不增加感受野的情况下，让网络加深，为的就是引入更多的非线性。
    通常一个卷积过程包括一个激活函数，比如 Sigmoid 和 Relu。

2 升维/降维
    大于1x1的卷积核无疑会增加计算的参数量

3 跨通道的信息交互
4 减少卷积核参数（简化模型）
"""

"""
[1]LeNet-5中主要有2个卷积层、2个下抽样层（池化层）、3个全连接层3种连接方式
[2]AlexNet
[3]VGGNet
[4]Google Inception Net = GoogLeNet
[5]ResNet
[6]ResNeXt
[7]SENet
[8]基于区域的CNNs (R-CNN - 2013, Fast R-CNN - 2015, Faster R-CNN - 2015)


BatchNormalization()
"""
