import os
import torch
from datasets import load_dataset, DatasetDict
from transformers import (
    AutoModelForCausalLM,
    AutoTokenizer,
    TrainingArguments,
    Trainer,
    DataCollatorForLanguageModeling,
    EarlyStoppingCallback,
)

# 配置参数
MODEL_NAME = "../Qwen2.5-1.5B"
TRAIN_DATA_PATH = "../dataset/converted_data.json"  # 训练集路径
VAL_DATA_PATH = "./dataset/converted_val.json"  # 验证集路径
OUTPUT_DIR = "./qwen_finetuned"
VAL_SPLIT_RATIO = 0.1  # 如果没有独立验证集，从训练集划分的比例


# 修改1：带验证集的数据加载
def load_datasets():
    """加载并拆分训练/验证数据集"""
    try:
        # 尝试加载独立验证集
        train = load_dataset("json", data_files=TRAIN_DATA_PATH, split="train")
        val = load_dataset("json", data_files=VAL_DATA_PATH, split="train")
    except:
        # 如果验证集不存在，从训练集划分
        full = load_dataset("json", data_files=TRAIN_DATA_PATH, split="train")
        split = full.train_test_split(test_size=VAL_SPLIT_RATIO, shuffle=True)
        return DatasetDict({"train": split["train"], "validation": split["test"]})
    return DatasetDict({"train": train, "validation": val})


# 修改2：添加验证集预处理
def preprocess_dataset(dataset):
    """统一处理训练和验证集"""
    tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, trust_remote_code=True)
    tokenizer.pad_token = tokenizer.eos_token

    # 模板格式化
    def format_chat_template(examples):
        return {
            "text": tokenizer.apply_chat_template(
                examples["messages"], tokenize=False, add_generation_prompt=False
            )
        }

    # 分词处理
    def tokenize_function(examples):
        return tokenizer(
            examples["text"],
            padding="max_length",
            max_length=1024,
            truncation=True,
            return_tensors="pt",
        )

    # 执行预处理
    processed = dataset.map(
        format_chat_template,
        batched=True,
        batch_size=2000,
        remove_columns=dataset["train"].column_names,
    ).map(
        tokenize_function,
        batched=True,
        batch_size=2000,
        remove_columns=["text"],
    )

    return processed


# 修改3：配置带验证的训练参数
training_args = TrainingArguments(
    output_dir=OUTPUT_DIR,
    per_device_train_batch_size=4,
    per_device_eval_batch_size=4,  # 评估batch size
    evaluation_strategy="steps",  # 按步评估
    eval_steps=200,  # 每200训练步评估一次
    gradient_accumulation_steps=2,
    learning_rate=3e-5,
    num_train_epochs=2,
    logging_steps=20,
    bf16=True,
    tf32=True,
    optim="adamw_torch_fused",
    save_strategy="steps",
    save_steps=200,
    report_to="tensorboard",
    max_grad_norm=0.3,
    warmup_ratio=0.05,
    gradient_checkpointing=True,
    load_best_model_at_end=True,  # 训练结束后加载最佳模型
    metric_for_best_model="eval_loss",  # 根据验证loss选择最佳模型
    greater_is_better=False,  # eval_loss越小越好
)


# 修改4：初始化带验证的Trainer
def main():
    # 加载数据
    raw_datasets = load_datasets()

    # 预处理
    tokenized_datasets = preprocess_dataset(raw_datasets)

    # 初始化模型
    model = AutoModelForCausalLM.from_pretrained(
        MODEL_NAME,
        attn_implementation="flash_attention_2",
        torch_dtype=torch.bfloat16,
        trust_remote_code=True,
        use_cache=False,
        device_map="auto",
    )

    tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, trust_remote_code=True)
    tokenizer.pad_token = tokenizer.eos_token

    # 数据整理器
    data_collator = DataCollatorForLanguageModeling(
        tokenizer=tokenizer, mlm=False, pad_to_multiple_of=64
    )

    # 创建Trainer
    trainer = Trainer(
        model=model,
        args=training_args,
        train_dataset=tokenized_datasets["train"],
        eval_dataset=tokenized_datasets["validation"],  # 添加验证集
        data_collator=data_collator,
        callbacks=[EarlyStoppingCallback(early_stopping_patience=3)],  # 早停回调
    )

    # 训练与评估
    train_result = trainer.train()
    trainer.save_model(OUTPUT_DIR)
    tokenizer.save_pretrained(OUTPUT_DIR)

    # 输出评估结果
    eval_results = trainer.evaluate()
    print(f"最终验证集loss: {eval_results['eval_loss']:.4f}")


if __name__ == "__main__":
    main()
