import json

from datetime import datetime
import os
import time

import torch
from tqdm import tqdm
from transformers import BertTokenizerFast, GPT2Config, GPT2LMHeadModel
import transformers
from config import ParameterConfig
from data_handle.data_loader import get_chat_dataloader
from torch.utils.data import DataLoader
from funtions_tools  import calculate_acc


def train_epoch(model: GPT2LMHeadModel,
                train_dataloader,
                optimizer, scheduler,
                epoch, args: ParameterConfig):
    '''
    :param model: GPT2模型
    :param train_dataloader: 训练数据集
    :param optimizer: 优化器：更新参数
    :param scheduler: 学习率预热
    :param epoch: 当前的轮次
    :param args: 模型配置文件的参数对象
    :return:
    '''
    model.train()
    device = args.device
    epoch_loss_list  = []
    epoch_correct_list = []
    epoch_total_list = []

    # ======================================== #
    # 1 核心训练代码
    # ======================================== #
    for step, (input_ids, labels) in enumerate(tqdm(train_dataloader, desc='🔄️Training')):
        input_ids = input_ids.to(device)
        labels = labels.to(device)

        # 1.1 前向传播：自动内部右移 labels
        outputs = model(input_ids, labels=labels)
        loss = outputs.loss
        logits = outputs.logits

        # 1.2 梯度累积(模拟大的batch size来训练模型)
        if args.gradient_accumulation_steps > 1:
            loss = loss / args.gradient_accumulation_steps

        # 1.3 反向传播 + 梯度裁剪
        loss.backward()
        torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)

        # 1.4 累积到一定步数再更新参数
        if (step + 1) % args.gradient_accumulation_steps == 0:
            tqdm.write(f"😊 epoch: {epoch + 1}, step: {step + 1}, loss: {loss.item()}")
            optimizer.step()
            scheduler.step()
            optimizer.zero_grad()

        # 1.5.1 统计 loss
        epoch_loss_list.append(loss.item())

        # 1.5.2 统计 correct_num, total_num
        batch_correct_num, batch_total_num = calculate_acc(logits, labels, ignore_index=args.ignore_index)
        epoch_correct_list.append(batch_correct_num)
        epoch_total_list.append(batch_total_num)
        tqdm.write('epoch: {}, step: {}, loss: {:.4f}, acc: {:.4f}'.format(epoch + 1, step+1 , loss.item(), batch_correct_num / batch_total_num))

        # 1.6 保存训练集的loss和acc (每20步 or 最后1步)
        if (step + 1) % 20 == 0 or step == len(train_dataloader) - 1:
            stats = {
                "loss": epoch_loss_list,
                "correct": epoch_correct_list,
                "total": epoch_total_list
            }
            stats_save_path = os.path.join(args.log_path, "training_stats实时记录.json")
            with open(stats_save_path, 'w') as f:
                json.dump(stats, f)

    # ======================================== #
    # 2 保存模型 (每10轮 or 训练开始 or  训练结束)
    # ======================================== #
    if epoch == 0 or (epoch + 1) % 10 == 0 or (epoch + 1) == args.epochs:
        print(f"✅ Saving model for epoch {epoch + 1}...")
        save_path = os.path.join(args.save_model_path, f"checkpoint_epoch{epoch + 1}")
        os.makedirs(save_path, exist_ok=True)
        model.save_pretrained(save_path)
        print(f"✅ Model saved to: {save_path}")

    # ======================================== #
    # 3 保存训练日志 (每轮都保存训练集的loss和acc)
    # ======================================== #
    stats = {
        "loss": epoch_loss_list,
        "correct": epoch_correct_list,
        "total": epoch_total_list,
        "step": [step + 1 for step in range(len(epoch_loss_list))]  # 新增 step 字段
    }

    stats_save_path = os.path.join(args.log_path, f"training_stats_epoch_{epoch + 1}.json")
    with open(stats_save_path, 'w') as f:
        json.dump(stats, f)

    print(f"✅ Training statistics saved to: {stats_save_path}")








def validate_epoch():
    ...


def train(model: GPT2LMHeadModel, 
          args: ParameterConfig,
          train_dataloader: DataLoader) -> None:
    
    #len(train_dataloader)-->训练一次完整的数据，需要迭代多少步7544
    # t_total: 计算整个训练过程中模型一共更新多少次参数（用于给学习率调度器调控学习率）。
    t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.epochs

    # 模型优化器：AdamW
    # eps，为了增加数值计算的稳定性而加到分母里的项，其为了防止在实现中除以零
    optimizer = torch.optim.AdamW(model.parameters(), lr=args.lr, eps=args.eps)

    # 创建学习率调度器
    scheduler = transformers.get_linear_schedule_with_warmup(optimizer, 
                                                             num_warmup_steps=args.warmup_steps, 
                                                             num_training_steps=t_total)
    print('starting training!!')


    # 用于记录每个epoch训练和验证的loss
    train_losses, validate_losses = [], []
    # 记录验证集的最小loss
    best_val_loss = 10000
    # 开始训练
    for epoch in range(args.epochs):
        # ========== train ========== #
        train_loss = train_epoch(model, train_dataloader=train_dataloader,
                                 optimizer=optimizer, scheduler=scheduler,
                                 epoch=epoch, args=args)
        break




def main():
    
    params = ParameterConfig()
    
    tokenizer = BertTokenizerFast(params.vocab_path)
    print("✅ Tokenizer loaded successfully.")

    # 创建模型的输出目录
    if not os.path.exists(params.save_model_path):
        os.mkdir(params.save_model_path)
        print(f"✅ {params.save_model_path} created successfully.")
    # 创建日志输出目录
    if not os.path.exists(params.log_path):
        os.mkdir(params.log_path)
        print(f"✅ {params.log_path} created successfully.")

    # 创建模型
    if params.pretrained_model:  
        # 加载预训练模型
        model = GPT2LMHeadModel.from_pretrained(params.pretrained_model)
        print("✅ Pretrained model loaded successfully.")
    else:  
        # 初始化模型
        model_config = GPT2Config.from_json_file(params.config_json)
        model = GPT2LMHeadModel(config=model_config)
        print("✅ Model initialized from config.")

    model = model.to(params.device)
    assert model.config.vocab_size == tokenizer.vocab_size  # 验证词表大小一致

    # 加载闲聊数据集
    chat_dataloader = get_chat_dataloader(params.chat_path)
    print(f"✅ Chat dataset:{len(chat_dataloader)} loaded successfully.")


    # ========= Start Training ========= 
    train(model, params, chat_dataloader)


if __name__ == '__main__':
    main()