import json
from datetime import datetime
import os
import time
import torch
from tqdm import tqdm
from transformers import BertTokenizerFast, GPT2Config, GPT2LMHeadModel
import transformers
from config import ParameterConfig
from data_handle.data_loader import get_chat_dataloader
from torch.utils.data import DataLoader
from funtions_tools import calculate_acc



def save_checkpoint(model, optimizer, scheduler, epoch, args):
    # ======================================== #
    # 保存断点训练的模型与优化器状态
    # ======================================== #
    checkpoint = {
        'epoch': epoch,
        'model_state_dict': model.state_dict(),
        'optimizer_state_dict': optimizer.state_dict(),
        'scheduler_state_dict': scheduler.state_dict()
    }
    save_path = os.path.join(args.save_model_path, f"checkpoint_epoch{epoch + 1}.pt")
    torch.save(checkpoint, save_path)
    print(f"✅ Checkpoint saved to {save_path}")



def load_checkpoint(model, optimizer, scheduler, args):
    # ======================================== #
    # 读取最近的断点训练文件
    # ======================================== #
    checkpoint_files = [f for f in os.listdir(args.save_model_path) if f.endswith('.pt')]
    if not checkpoint_files:
        print("🆕 No checkpoint found, start training from scratch.")
        return model, optimizer, scheduler, 0
    else:
        checkpoint_files.sort(key=lambda x: int(x.split('_')[-1].split('.')[0][5:]))  # checkpoint_epoch{N}.pt
        latest_checkpoint = checkpoint_files[-1]
        checkpoint_path = os.path.join(args.save_model_path, latest_checkpoint)

        # 获取checkpoint之后，更新参数
        checkpoint = torch.load(checkpoint_path, map_location=args.device)
        model.load_state_dict(checkpoint['model_state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        scheduler.load_state_dict(checkpoint['scheduler_state_dict'])
        start_epoch = checkpoint['epoch'] + 1
        print(f"✅ Loaded epoch: {start_epoch}  from {checkpoint_path}")
        return model, optimizer, scheduler, start_epoch



def train_epoch(model: GPT2LMHeadModel, train_dataloader, 
                optimizer, scheduler, epoch, args: ParameterConfig):
    model.train()
    device = args.device
    epoch_loss_list, epoch_correct_list, epoch_total_list = [], [], []

    # ======================================== #
    # 1 核心训练代码
    # ======================================== #
    for step, (input_ids, labels) in enumerate(tqdm(train_dataloader, desc='🔄️🔄️🔄️Training')):

        input_ids = input_ids.to(device)
        labels = labels.to(device)

        # 1.1 前向传播：自动内部右移 labels
        outputs = model(input_ids, labels=labels)
        loss = outputs.loss
        logits = outputs.logits

        # 1.2 梯度累积(模拟大的batch size来训练模型)
        if args.gradient_accumulation_steps > 1:
            loss = loss / args.gradient_accumulation_steps

        # 1.3 反向传播 + 梯度裁剪
        loss.backward()
        torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)

        # 1.4 累积到一定步数再更新参数
        if (step + 1) % args.gradient_accumulation_steps == 0:
            tqdm.write(f"😊 epoch: {epoch + 1}, step: {step + 1}, loss: {loss.item()}")
            optimizer.step()
            scheduler.step()
            optimizer.zero_grad()

        # 1.5 统计训练指标
        epoch_loss_list.append(loss.item())
        batch_correct_num, batch_total_num = calculate_acc(logits, labels, ignore_index=args.ignore_index)
        epoch_correct_list.append(batch_correct_num)
        epoch_total_list.append(batch_total_num)

        tqdm.write('epoch: {}, step: {}, loss: {:.4f}, acc: {:.4f}'.format(epoch + 1, step+1, loss.item(), batch_correct_num / batch_total_num))

        # 1.6 实时保存训练过程中的loss和acc (每20步 or 最后1步)
        if (step + 1) % 20 == 0 or step == len(train_dataloader) - 1:
            stats = {
                "loss": epoch_loss_list, 
                "correct": epoch_correct_list, 
                "total": epoch_total_list
            }
            stats_save_path = os.path.join(args.log_path, "training_stats实时记录.json")
            with open(stats_save_path, 'w') as f:
                json.dump(stats, f)

    # ======================================== #
    # 2 每轮训练完成后保存训练日志
    # ======================================== #
    stats_save_path = os.path.join(args.log_path, f"training_stats_epoch_{epoch + 1}.json")
    stats = {
        "loss": epoch_loss_list,
        "correct": epoch_correct_list,
        "total": epoch_total_list,
        "step": [step + 1 for step in range(len(epoch_loss_list))]
    }
    with open(stats_save_path, 'w') as f:
        json.dump(stats, f)
    print(f"✅ Training statistics saved to: {stats_save_path}")



def train(model: GPT2LMHeadModel, args: ParameterConfig, train_dataloader: DataLoader):

    # 1 计算总共需要训练多少步
    t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.epochs

    # 2 实例化优化器与学习率调度器
    optimizer = torch.optim.AdamW(model.parameters(), lr=args.lr, eps=args.eps)
    scheduler = transformers.get_linear_schedule_with_warmup(
        optimizer, 
        num_warmup_steps=args.warmup_steps, 
        num_training_steps=t_total
    )

    # 3 检查是否存在断点训练的checkpoint
    model, optimizer, scheduler, start_epoch = load_checkpoint(model, optimizer, scheduler, args)


    for epoch in range(start_epoch, args.epochs):
        train_epoch(model, train_dataloader, optimizer, scheduler, epoch, args)
        save_checkpoint(model, optimizer, scheduler, epoch, args)


# ======================================== #
# 6 主函数入口
# ======================================== #
def main():
    params = ParameterConfig()
    tokenizer = BertTokenizerFast(params.vocab_path)
    print("✅ Tokenizer loaded successfully.")

    # 创建保存模型和日志目录
    if not os.path.exists(params.save_model_path):
        os.mkdir(params.save_model_path)
        print(f"✅ {params.save_model_path} created successfully.")
    if not os.path.exists(params.log_path):
        os.mkdir(params.log_path)
        print(f"✅ {params.log_path} created successfully.")

    # 初始化模型（支持加载预训练模型或全新初始化）
    if params.pretrained_model:  
        model = GPT2LMHeadModel.from_pretrained(params.pretrained_model)
        print("✅ Pretrained model loaded successfully.")
    else:  
        model_config = GPT2Config.from_json_file(params.config_json)
        model = GPT2LMHeadModel(config=model_config)
        print("✅ Model initialized from config.")

    model = model.to(params.device)
    assert model.config.vocab_size == tokenizer.vocab_size

    # 加载训练数据
    chat_dataloader = get_chat_dataloader(params.chat_path)
    print(f"✅ Chat dataset:{len(chat_dataloader)} loaded successfully.")

    # 启动训练
    train(model, params, chat_dataloader)


if __name__ == '__main__':
    main()
