import json
import os
import time

from data_handle.dataset_dataloader import get_dataloader
from torchinfo import summary
import torch
from parameter_config import ParameterConfig
from transformers import BertTokenizerFast, GPT2LMHeadModel, GPT2Config
import transformers
from tqdm import tqdm
from utils.functions_tools import calculate_accuracy, calculate_loss


def train_epoch(model, optimizer, params, train_dataloader, epoch, lr_scheduler):
    # 设置训练模式
    model.train()

    start_time = time.time()

    total_loss = 0  # 总损失
    loss_list = []
    epoch_correct_num = 0  # 记录当前epoch的准确数
    epoch_total_word = 0  # 统计当前epoch的词总数
    # 遍历dataloader
    for batch_idx, (input_ids, labels) in enumerate(tqdm(train_dataloader), start=1):
        input_ids = input_ids.to(params.device)
        labels = labels.to(params.device)

        outputs = model(input_ids=input_ids, labels=labels)
        # print(outputs.keys()) # odict_keys(['loss', 'logits', 'past_key_values'])
        logits = outputs.logits  # torch.Size([4, 282, 13317])
        # loss = outputs.loss
        loss = calculate_loss(logits, labels, params.ignore_index, smoothing=True)

        loss_list.append(loss.item())
        loss = loss.mean()
        total_loss += loss.item()

        # 获取预测正确的数量以及总预测数量
        batch_n_correct, batch_n_word = calculate_accuracy(logits, labels, ignore_index=params.ignore_index)
        # 计算训练批次准确率
        batch_accuracy = batch_n_correct / batch_n_word
        epoch_correct_num += batch_n_correct
        epoch_total_word += batch_n_word

        # 梯度累积：是一种在显存受限情况下模拟大批量训练的技术。
        # 它通过在多个小批次上累积梯度，然后一次性更新参数，从而达到与使用大批量训练相同的效果。
        if params.gradient_accumulation_steps > 1:
            loss = loss / params.gradient_accumulation_steps

        # 反向传播
        loss.backward()

        # 梯度裁剪：是一种防止梯度爆炸的技术，通过限制梯度的范数来稳定训练过程。
        torch.nn.utils.clip_grad_norm_(model.parameters(), params.max_grad_norm)

        # 到达一定的步数之后，进行参数更新
        if batch_idx % params.gradient_accumulation_steps == 0:
            # 更新参数
            optimizer.step()
            # 更新学习率
            lr_scheduler.step()
            # 梯度清零
            optimizer.zero_grad()

        if batch_idx % params.loss_step == 0:
            true_loss = loss * params.gradient_accumulation_steps
            lr = lr_scheduler.get_last_lr()[0]
            print('Epoch: {}/{} | Loss: {:.5f} | Train Accuracy: {:.5f} | Lr: {:.5f}'.format(
                epoch, params.epochs, true_loss, batch_accuracy, lr))

    # 记录当前epoch的平均loss
    epoch_mean_loss = total_loss / len(train_dataloader)

    # epoch_mean_loss = sum(loss_list)/len(loss_list)
    epoch_mean_accuracy = epoch_correct_num / epoch_total_word
    spend_time = time.time() - start_time
    print('Epoch: {}/{} | Train Mean Loss: {:.5f} | Train Mean Accuracy: {:.5f} | Spend Time {:.5f}'.format(
        epoch, params.epochs, epoch_mean_loss, epoch_mean_accuracy, spend_time))

    if epoch % 10 == 0 or epoch == params.epochs:
        model_path = os.path.join(params.save_model_path, 'epoch_{}'.format(epoch))
        if not os.path.exists(model_path):
            os.mkdir(model_path)
        model.save_pretrained(model_path)

    return epoch_mean_loss, epoch_mean_accuracy


def valid_epoch(model, params, valid_dataloader, epoch):
    total_loss = 0
    loss_list = []
    val_epoch_correct_num = 0  # 统计当前验证集epoch的准确数
    val_epoch_total_word = 0  # 统计当前验证集epoch的词总数
    start_time = time.time()

    model.eval()

    with torch.no_grad():
        for batch_idx, (input_ids, labels) in enumerate(valid_dataloader, start=1):
            input_ids = input_ids.to(params.device)
            labels = labels.to(params.device)

            outputs = model(input_ids=input_ids, labels=labels)
            logits = outputs.logits
            loss = outputs.loss

            loss_list.append(loss.item())
            loss = loss.mean()
            total_loss += loss.item()

            batch_n_correct, batch_n_word = calculate_accuracy(logits, labels, ignore_index=params.ignore_index)
            # 计算评估批次准确率
            # val_batch_accuracy = batch_n_correct / batch_n_word
            val_epoch_correct_num += batch_n_correct
            val_epoch_total_word += batch_n_word

    # 计算验证集的平均loss和准确率
    val_mean_loss = total_loss / len(valid_dataloader)
    # val_mean_loss = sum(loss_list) / len(loss_list)
    val_mean_accuracy = val_epoch_correct_num / val_epoch_total_word

    spend_time = time.time() - start_time
    print('Epoch: {}/{} | Val Mean Loss: {:.5f} | Val Mean Accuracy: {:.5f} | Spend Time {:.5f}'.format(
        epoch, params.epochs, val_mean_loss, val_mean_accuracy, spend_time))
    return val_mean_loss, val_mean_accuracy


def train(model, params):
    # 记录损失和准确率
    loss_train = []
    loss_valid = []
    accuracy_train = []
    accuracy_valid = []

    # 准备数据
    train_dataloader, valid_dataloader = get_dataloader(params.train_path, params.valid_path, params.batch_size)
    # 优化器
    optimizer = transformers.AdamW(model.parameters(), lr=params.lr, eps=params.eps)

    # 学习率预热
    t_total = (len(train_dataloader) // params.gradient_accumulation_steps) * params.epochs
    lr_scheduler = transformers.get_linear_schedule_with_warmup(
        optimizer, num_warmup_steps=params.num_warmup_steps, num_training_steps=t_total)

    best_valid_loss = 100000
    # 遍历轮次
    for epoch in range(1, params.epochs + 1):
        # 模型训练
        train_loss, train_accuracy = train_epoch(model, optimizer, params, train_dataloader, epoch, lr_scheduler)

        loss_train.append(train_loss)
        accuracy_train.append(train_accuracy)

        # 模型评估
        valid_loss, train_accuracy = valid_epoch(model, params, valid_dataloader, epoch)

        loss_valid.append(valid_loss)
        accuracy_valid.append(train_accuracy)

        # 模型保存:保存当前准确率高/损失低的模型
        if valid_loss < best_valid_loss:
            best_valid_loss = valid_loss
            # ./save_model/min_loss_model_20/
            model_path = os.path.join(params.save_model_path, 'min_loss_model_{}'.format(epoch))
            # print(model_path)
            if not os.path.exists(model_path):
                os.mkdir(model_path)
        os.makedirs(params.result_path, exist_ok=True)  # 确保目录存在
        result = {'loss_train': loss_train, 'loss_valid': loss_valid, 'accuracy_train': accuracy_train,
                  'accuracy_valid': accuracy_valid}
        with open(os.path.join(params.result_path, 'loss.json'), 'w', encoding='utf-8') as f:
            json.dump(result, f, ensure_ascii=False, indent=2)


def main():
    # 加载配置参数
    params = ParameterConfig()

    # 初始化tokenizer
    tokenizer = BertTokenizerFast(params.vocab_path,
                                  sep_token="[SEP]",
                                  pad_token="[PAD]",
                                  cls_token="[CLS]")

    # 初始化模型
    if params.pretrained_model:
        model = GPT2LMHeadModel.from_pretrained(params.pretrained_model)
    # 初始化模型
    else:
        model_config = GPT2Config.from_json_file(params.config_json)
        model = GPT2LMHeadModel(config=model_config)
    # print(model)
    model.to(params.device)

    # 断言：确认vocab_size一致
    assert tokenizer.vocab_size == model.config.vocab_size

    # # 打印模型架构和参数
    # summary(model, input_size=(5, 8), dtypes=[torch.long], device=params.device)
    # # 打印每一层对应的参数
    # for name, param in model.named_parameters():
    #     print(f"Layer: {name} | Shape: {param.shape} | Params: {param.numel()} ")
    # 统计总共参数
    total_params = sum(p.numel() for p in model.parameters())
    trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
    print(f"Total params: {total_params}\nTrainable params: {trainable_params}")

    # 创建模型保存目录
    if not os.path.exists(params.save_model_path):
        os.mkdir(params.save_model_path)

    # 模型训练（训练+评估）
    train(model, params)


if __name__ == '__main__':
    main()
