import json
import torch
from datasets import Dataset, DatasetDict
from transformers import (
    AutoModelForCausalLM,
    AutoTokenizer,
    TrainingArguments,
    Trainer,
    DataCollatorForLanguageModeling,
    BitsAndBytesConfig
)
from peft import LoraConfig, get_peft_model, prepare_model_for_kbit_training


# 加载数据集并预处理
def load_and_preprocess_data(file_path):
    """加载数据集并转换为训练所需格式"""
    with open(file_path, 'r', encoding='utf-8') as f:
        data = json.load(f)

    # 格式化训练样本（更简洁的提示词减少序列长度）
    def format_example(example):
        nums = example['nums']
        target = example['target']
        solution = example['solution']
        nums_str = ", ".join(map(str, nums))
        # 简化提示词以减少序列长度
        prompt = f"用{nums_str}得到{target}的数学表达式："
        response = solution if solution else "未知"
        return f"<s>[INST] {prompt} [/INST] {response}</s>"

    # 转换为HuggingFace Dataset格式
    train_data = [format_example(elem) for elem in data['train']]
    test_data = [format_example(elem) for elem in data['test']]

    dataset = DatasetDict({
        'train': Dataset.from_dict({'text': train_data}),
        'test': Dataset.from_dict({'text': test_data})
    })
    return dataset


# 加载模型和分词器
def load_model_and_tokenizer(model_name):
    tokenizer = AutoTokenizer.from_pretrained(model_name)
    tokenizer.pad_token = tokenizer.eos_token

    bnb_config = BitsAndBytesConfig(
        load_in_4bit=True,
        bnb_4bit_use_double_quant=True,  # 双量化节省更多显存
        bnb_4bit_quant_type="nf4",  # 更适合训练的量化类型
        bnb_4bit_compute_dtype=torch.float16  # 计算精度
    )

    model = AutoModelForCausalLM.from_pretrained(
        model_name,
        device_map="auto",
        torch_dtype=torch.float16,
        quantization_config=bnb_config,
        low_cpu_mem_usage=True  # 减少CPU内存使用
    )

    model = prepare_model_for_kbit_training(model)

    lora_config = LoraConfig(
        r=8,
        lora_alpha=32,
        target_modules=["q_proj", "k_proj", "v_proj"],
        lora_dropout=0.05,
        bias="none",
        task_type="CAUSAL_LM"
    )

    model = get_peft_model(model, lora_config)
    model.print_trainable_parameters()
    return model, tokenizer

# 数据预处理函数
def preprocess_function(examples, tokenizer, max_length=64):
    """对文本进行分词处理"""
    return tokenizer(
        examples["text"],
        truncation=True,
        max_length=max_length,
        padding="max_length",
        return_tensors="pt"
    )

# 主训练函数
def train():
    dataset = load_and_preprocess_data("data.json")
    model, tokenizer = load_model_and_tokenizer("Qwen/Qwen2.5-0.5B")
    tokenized_dataset = dataset.map(
        lambda x: preprocess_function(x, tokenizer),
        batched=True,
        remove_columns=["text"]
    )

    # 配置训练参数
    training_args = TrainingArguments(
        output_dir="./qwen-lora-results-small",
        num_train_epochs=2,
        per_device_train_batch_size=2,
        per_device_eval_batch_size=1,
        gradient_accumulation_steps=4,
        warmup_steps=20,
        logging_dir="./logs",
        logging_steps=5,
        eval_strategy="epoch",
        save_strategy="epoch",
        load_best_model_at_end=True,
        learning_rate=3e-4,
        weight_decay=0.01,
        fp16=True,
        report_to="none",
        optim="paged_adamw_8bit",
        gradient_checkpointing=True,
    )

    # 数据整理器
    data_collator = DataCollatorForLanguageModeling(
        tokenizer=tokenizer,
        mlm=False
    )

    # 初始化Trainer
    trainer = Trainer(
        model=model,
        args=training_args,
        train_dataset=tokenized_dataset["train"],
        eval_dataset=tokenized_dataset["test"],
        data_collator=data_collator
    )

    # 开始训练
    print("start..")
    trainer.train()

    # 保存LoRA适配器
    model.save_pretrained("qwen-lora-adapter-small")
    print("finish")

if __name__ == "__main__":
    train()
