"""
@Description :   模型训练
@Author      :   python_assignment_group 
@Time        :   2022/10/30 07:23:51
"""

import os
import random
import time
import warnings

import torch
import torch.nn as nn
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from torch.utils.tensorboard.writer import SummaryWriter
from transformers import AdamW, get_linear_schedule_with_warmup

from configs import *
from nets import *
from tools.data_process import *
from tools.utils import *

warnings.filterwarnings("ignore")

nets = [Word2VecCNNNet, FastTextNet, BertNet]  # 所有的网络
get_datasets = [Word2VecDataset, FastTextDataset, BertDataset]  # Dataset获取
# nets = [None, FastTextNet, None]  # 所有的网络
# get_datasets = [Word2VecDataset, FastTextDataset, BertDataset]  # Dataset获取

# 数据分割
data_split = DataSplit(
    train_configs[0]["raw_data_path"], test_percent=train_configs[0]["test_percent"], data_split_num=train_configs[0]["data_split_num"], resplit_data=train_configs[0]["resplit_data"])
data_split()

# 存储所有模型的训练结果
all_train_stat = []

# 训练所有网络
for net_i in range(len(nets)):

    if nets[net_i] == None:  # 跳过训练的网络用None代替
        continue

    # 选择网络对应参数
    config = train_configs[net_i]
    # 保存每个网络的训练结果
    net_train_stat = {}

    # 每个网络要训练data_split_num次
    for i in range(1, 1+config["data_split_num"]):

        # 加载数据集
        train_data, valid_data = data_split.load_data(data_num=i)
        data_split.data_preprocess(data_num=i, mode=0, data=train_data)
        data_split.data_preprocess(data_num=i, mode=1, data=valid_data)

        # 构建dataset
        train_dataset = get_datasets[net_i](train_data)
        valid_dataset = get_datasets[net_i](valid_data)

        # 构建dataloader
        train_dataloader = DataLoader(
            train_dataset,
            sampler=RandomSampler(train_dataset),  # 随机选取batch
            batch_size=config["batch_size"],
        )
        valid_dataloader = DataLoader(
            valid_dataset,
            sampler=SequentialSampler(valid_dataset),  # 按顺序测试
            batch_size=config["batch_size"],
        )

        # 构建网络
        net = nets[net_i]()

        # 使用GPU
        net.to(device)

        # 选择交叉熵为损失函数
        criterion = nn.CrossEntropyLoss()

        # 构建优化器

        # 设置权值衰减
        no_decay = ['bias', 'LayerNorm.weight']
        optimizer_grouped_parameters = [
            {'params': [p for n, p in net.named_parameters() if not any(
                nd in n for nd in no_decay)], 'weight_decay': config["weight_dacay_rate"]},
            {'params': [p for n, p in net.named_parameters() if any(
                nd in n for nd in no_decay)], 'weight_decay': config["weight_dacay_rate"]}
        ]

        # 选择优化器
        optimizer = AdamW(optimizer_grouped_parameters,
                          lr=config["lr"],
                          eps=1e-8
                          )
        # optimizer = SGD(optimizer_grouped_parameters,lr=config["lr"],momentum=.9)

        # 选择学习率下降策略
        total_steps = len(train_dataloader) * config["epoch_num"]
        scheduler = get_linear_schedule_with_warmup(optimizer,
                                                    num_warmup_steps=0,  # 在此区间学习率上升
                                                    num_training_steps=total_steps)

        # TensorBoard可视化
        log_dir = os.path.join(
            config["figs_path"], "Tensorboard", "data"+str(i))
        if os.path.exists(log_dir):
            shutil.rmtree(log_dir)
        writer = SummaryWriter(log_dir=log_dir)

        # 训练
        print("正在使用"+str(device))

        # random.seed(random_seed)
        # np.random.seed(random_seed)
        # torch.manual_seed(random_seed)
        # torch.cuda.manual_seed_all(random_seed)

        # 保存一些epoch际的数据
        max_acc = 0  # 记录最大的准确率
        train_stat = {  # 模型状态（用于画图的信息）
            "训练损失函数值": [],
            "验证损失函数值": [],
            "平均验证损失函数值": [],
            "训练准确率": [],
            "平均训练准确率": [],
            "平均训练损失函数值": [],
            "验证准确率": [],
            "平均训练用时": [],
            "平均验证用时": []
        }

        # 开始时间，用于计算时间
        total_t0 = time.time()

        # 书写日志
        if not os.path.exists(config["log_path"]):
            os.makedirs(config["log_path"])
        log_name = os.path.join(
            config["log_path"], "log"+str(time.time()).replace(".", "")+".txt")
        file = open(log_name, "w")

        # 写入config
        write_configs(file, config, i)
        file.write("batch size:{}\n".format(config["batch_size"]))
        file.write("learning rate:{}\n".format(config["lr"]))

        # 加载模型状态缓存
        min_epoch = 0
        if config["use_cache"] == True:
            min_epoch = load_net_stats(
                config["cache_path"], config["buffer_name"], net, optimizer)

        for epoch_i in range(min_epoch, config["epoch_num"]):

            print("")
            print(
                '======== Epoch {:} / {:} ========'.format(epoch_i + 1, config["epoch_num"]))
            print("Net:"+nets_names[net_i]+" Data:"+str(i)+'正在训练中...')
            file.write("\n")
            file.write(
                '======== Epoch {:} / {:} ========\n'.format(epoch_i + 1, config["epoch_num"]))
            file.write("Net:"+nets_names[net_i]+" Data:"+str(i)+'正在训练中...\n')

            # 保存1个epoch内有效的信息
            t0 = time.time()  # 这个epoch训练的开始时间
            total_train_loss = 0  # 总损失，用于计算平均损失
            total_train_acc = 0  # 训练准确率
            total_train_acc1 = 0  # 用于计算平均训练准确率

            net.train()

            for step, batch in enumerate(train_dataloader):

                # `batch` 内含以下张量:
                #   [0]: input ids
                #   [1]: attention masks
                #   [2]: labels

                net.zero_grad()

                if net_i == 2:
                    outputs = net.forward(batch)
                    b_labels = batch[2].to(device)
                    logits = outputs
                    loss = criterion(outputs, b_labels)
                    total_train_loss += loss.item()
                    train_stat["训练损失函数值"].append(loss.item())
                    writer.add_scalars(
                        "Loss", {"TrainLoss_epoch"+str(epoch_i)+"_batch": loss.item()}, step)
                    logits_ = logits.detach().cpu().numpy()
                    b_labels_ = b_labels.to('cpu').numpy()
                    total_train_acc += flat_accuracy(logits_, b_labels_)
                    total_train_acc1 += flat_accuracy(logits_, b_labels_)
                    writer.add_scalars(
                        "Acc", {"TrainAcc_epoch"+str(epoch_i)+"_batch": flat_accuracy(logits_, b_labels_)}, step)
                    loss.backward()
                else:
                    outputs = net.forward(batch)
                    b_labels = batch[1].to(device)
                    logits = outputs
                    loss = criterion(outputs, b_labels)
                    total_train_loss += loss.item()
                    train_stat["训练损失函数值"].append(loss.item())
                    writer.add_scalars(
                        "Loss", {"TrainLoss_epoch"+str(epoch_i)+"_batch": loss.item()}, step)
                    logits_ = logits.detach().cpu().numpy()
                    b_labels_ = b_labels.to('cpu').numpy()
                    total_train_acc += flat_accuracy(logits_, b_labels_)
                    total_train_acc1 += flat_accuracy(logits_, b_labels_)
                    writer.add_scalars(
                        "Acc", {"TrainAcc_epoch"+str(epoch_i)+"_batch": flat_accuracy(logits_, b_labels_)}, step)
                    loss.backward()

                # # 用于防止梯度爆炸
                # torch.nn.utils.clip_grad_norm_(net.parameters(), 1.0)

                optimizer.step()

                scheduler.step()

                # 每40步打印训练输出
                if step % 40 == 0 and not step == 0:
                    elapsed = format_time(time.time() - t0)  # 训练40个batch用时
                    train_stat["训练准确率"].append(total_train_acc/40)  # 训练准确率
                    print('  Batch {:>5,}  of  {:>5,}.    用时: {:}.    训练准确率: {:.4f}.'.format(
                        step, len(train_dataloader), elapsed, total_train_acc/40))
                    file.write('  Batch {:>5,}  of  {:>5,}.    用时: {:}.    训练准确率: {:.4f}.\n'.format(
                        step, len(train_dataloader), elapsed, total_train_acc/40))
                    total_train_acc = 0  # 重置总准确率

            # 计算平均训练损失值
            avg_train_loss = total_train_loss / len(train_dataloader)
            avg_train_acc = total_train_acc1/len(train_dataloader)
            train_stat["平均训练损失函数值"].append(avg_train_loss)
            train_stat["平均训练准确率"].append(avg_train_acc)
            writer.add_scalars(
                "Loss", {"TrainLoss_epoch": avg_train_loss}, epoch_i)
            writer.add_scalars(
                "Acc", {"TrainAcc_epoch": avg_train_acc}, epoch_i)
            # 计算训练1个epoch用时
            training_time = format_time(time.time() - t0)
            train_stat["平均训练用时"].append(training_time)

            print("")
            print("  平均训练损失函数值: {0:.4f}".format(avg_train_loss))
            print("  平均训练准确率: {0:.4f}".format(avg_train_acc))
            print("  训练这一个epoch用时: {:}".format(training_time))
            file.write("\n")
            file.write("  平均训练损失函数值: {0:.4f}\n".format(avg_train_loss))
            print("  平均训练准确率: {0:.4f}\n".format(avg_train_acc))
            file.write("  训练这一个epoch用时: {:}\n".format(training_time))

            # 保存模型的参数
            if config["gen_cache"] == True:
                print("正在保存模型状态...")
                cache_path = os.path.join(config["cache_path"], "data"+str(i))
                save_net_stats(cache_path, net,
                               optimizer, train_stat, 1+epoch_i)

            # 验证集上验证
            print("")
            print("正在进行验证...")
            file.write("\n")
            file.write("正在进行验证...\n")

            t0 = time.time()

            net.eval()

            # 验证参数
            total_eval_accuracy = 0
            total_eval_loss = 0
            nb_eval_steps = 0

            print("Net:"+nets_names[net_i]+" Data:"+str(i)+'正在验证中...')
            file.write("Net:"+nets_names[net_i]+" Data:"+str(i)+'正在验证中...\n')

            for step, batch in enumerate(valid_dataloader):

                # b_input_ids = batch[0].to(device)
                # b_input_mask = batch[1].to(device)
                # b_labels = batch[2].to(device)

                # 不用反向传播
                with torch.no_grad():

                    # outputs = net(b_input_ids,
                    #                        token_type_ids=None,
                    #                        attention_mask=b_input_mask,
                    #                        labels=b_labels)
                    if net_i == 2:
                        outputs = net.forward(batch)
                        logits = outputs
                        b_labels = batch[2].to(device)
                        loss = criterion(outputs, b_labels)
                        total_eval_loss += loss.item()
                        train_stat["验证损失函数值"].append(loss.item())
                        writer.add_scalars(
                            "Loss", {"ValidLoss_epoch"+str(epoch_i)+"_batch": loss.item()}, step)
                        b_labels = batch[2].to(device)
                        logits = logits.detach().cpu().numpy()
                        label_ids = b_labels.to('cpu').numpy()

                    else:
                        outputs = net.forward(batch)
                        logits = outputs
                        b_labels = batch[1].to(device)
                        loss = criterion(outputs, b_labels)
                        total_eval_loss += loss.item()
                        train_stat["验证损失函数值"].append(loss.item())
                        writer.add_scalars(
                            "Loss", {"ValidLoss_epoch"+str(epoch_i)+"_batch": loss.item()}, step)
                        b_labels = batch[1].to(device)
                        logits = logits.detach().cpu().numpy()
                        label_ids = b_labels.to('cpu').numpy()

                # 计算总验证准确率
                total_eval_accuracy += flat_accuracy(logits, label_ids)
                writer.add_scalars(
                    "Acc", {"ValidAcc_epoch"+str(epoch_i)+"_batch": flat_accuracy(logits, label_ids)}, step)

            # 打印验证结果
            avg_val_accuracy = total_eval_accuracy / len(valid_dataloader)
            train_stat["验证准确率"].append(avg_val_accuracy)
            writer.add_scalars(
                "Acc", {"ValidAcc_epoch": avg_val_accuracy}, epoch_i)
            print("  准确率: {0:.4f}".format(avg_val_accuracy))
            file.write("  准确率: {0:.4f}\n".format(avg_val_accuracy))

            # TensorBoard记录net的参数
            for name, param in net.named_parameters():
                writer.add_histogram(
                    "epoch_"+str(epoch_i)+name+"_grad", param.grad, epoch_i)  # 保存梯度
                writer.add_histogram(
                    "epoch_"+str(epoch_i)+name+"_data", param, epoch_i)  # 保存值

            # 保存最优的网络参数
            if avg_val_accuracy > max_acc:
                if config["gen_cache"] == True and max_acc != 0:
                    cache_path = os.path.join(
                        config["cache_path"], "data"+str(i))
                    save_net_stats(cache_path, net, optimizer,
                                   train_stat, 1+epoch_i, mode=1)
                max_acc = avg_val_accuracy

            # 计算平均验证损失值
            avg_val_loss = total_eval_loss / len(valid_dataloader)
            train_stat["平均验证损失函数值"].append(avg_val_loss)
            writer.add_scalars(
                "Loss", {"ValidLoss_epoch": avg_val_loss}, epoch_i)
            # 计算验证用时
            validation_time = format_time(time.time() - t0)
            train_stat["平均验证用时"].append(validation_time)

            # 打印验证结果
            print("  验证损失函数值: {0:.4f}".format(avg_val_loss))
            print("  验证用时: {:}".format(validation_time))
            file.write("  验证损失函数值: {0:.4f}\n".format(avg_val_loss))
            file.write("  验证用时: {:}\n".format(validation_time))

        print("")
        print("训练完成!")
        print("训练最优准确率{0:.4f}.".format(max_acc))
        print("训练总用时 {:} (h:mm:ss)".format(format_time(time.time()-total_t0)))
        file.write("\n")
        file.write("训练完成!\n")
        file.write("训练最优准确率{0:.4f}.\n".format(max_acc))
        file.write("训练总用时 {:} (h:mm:ss)\n".format(
            format_time(time.time()-total_t0)))
        file.close()

        # 把模型状态保存至网络训练结果
        net_train_stat["data"+str(i)] = train_stat

        # 画图并存储画图信息
        fig_path = os.path.join(config["figs_path"], "data"+str(i))
        plot_for_each_dataset(fig_path, train_stat)
        if not os.path.exists(os.path.join("figs", "info_for_figs")):
            os.makedirs(os.path.join("figs", "info_for_figs"))
        np.save(os.path.join("figs", "info_for_figs", nets_names[net_i]+"data"+str(i)+".npy"),
                np.array(train_stat))

    all_train_stat.append(net_train_stat)
    fig_path = os.path.join(config["figs_path"], "all_data")
    plot_for_each_net(fig_path, net_train_stat)
    np.save(os.path.join("figs", "info_for_figs", nets_names[net_i]+"all_data.npy"),
            np.array(net_train_stat))

np.save(os.path.join("figs", "info_for_figs", "all_data.npy"),
        np.array(all_train_stat))
