# train.py
"""
TinyLLaMA / Qwen2.5 微调示例
适合显存 <= 6GB 的显卡
使用 HuggingFace Transformers + PEFT（LoRA）微调
"""

import os
from transformers import AutoTokenizer, AutoModelForCausalLM, Trainer, TrainingArguments
from datasets import load_dataset
from peft import LoraConfig, get_peft_model

# -----------------------------
# 1. 配置参数
# -----------------------------
MODEL_NAME = "NousResearch/TinyLLaMA-1b"  # 模型名称，可替换为 Qwen2.5-0.5B
DATA_PATH = "./data/superstar_dataset.jsonl"  # 数据集路径
OUTPUT_DIR = "./tinyllama_model"  # 训练输出目录

# -----------------------------
# 2. 加载分词器
# -----------------------------
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)

# TinyLLaMA / Qwen2.5 需要设置 padding
tokenizer.padding_side = "right"
tokenizer.pad_token = tokenizer.eos_token  # 没有 pad_token 时用 eos_token

# -----------------------------
# 3. 加载模型
# -----------------------------
# load_in_8bit=True: 8-bit 量化，节省显存
# device_map="auto": 自动映射到 GPU/CPU
model = AutoModelForCausalLM.from_pretrained(
    MODEL_NAME,
    load_in_8bit=True,
    device_map="auto"
)

# -----------------------------
# 4. 配置 LoRA
# -----------------------------
# LoRA 只微调部分权重，显存消耗小
lora_config = LoraConfig(
    r=8,  # LoRA 秩，控制参数量
    lora_alpha=32,  # 学习率缩放
    target_modules=["q_proj", "v_proj"],  # TinyLLaMA 关键线性层
    lora_dropout=0.1,  # Dropout
    bias="none",
    task_type="CAUSAL_LM"
)

model = get_peft_model(model, lora_config)

# -----------------------------
# 5. 加载数据集
# -----------------------------
# JSONL 格式，每行 {"prompt": "xxx", "completion": "yyy"}
dataset = load_dataset("json", data_files=DATA_PATH)

# -----------------------------
# 6. Tokenization 函数
# -----------------------------
def tokenize(batch):
    # 合并 prompt + completion
    text = batch["prompt"] + batch["completion"]
    # 128长度截断，显存友好
    tokenized = tokenizer(text, truncation=True, max_length=128, padding="max_length")
    return tokenized

tokenized_dataset = dataset.map(tokenize, batched=True)
train_dataset = tokenized_dataset["train"]  # Tiny数据集只有train

# -----------------------------
# 7. 设置训练参数
# -----------------------------
training_args = TrainingArguments(
    output_dir=OUTPUT_DIR,           # 模型保存路径
    per_device_train_batch_size=1,   # 显存小，batch=1
    gradient_accumulation_steps=8,   # 累积梯度，相当于 batch=8
    learning_rate=2e-4,              # 学习率
    num_train_epochs=3,              # 训练轮数
    fp16=True,                        # 半精度训练，降低显存
    logging_steps=5,                 # 每 5 步打印一次日志
    save_steps=20,                   # 每 20 步保存一次模型
    save_total_limit=2,              # 最多保留两个 checkpoint
    remove_unused_columns=False,     # 保留数据集中所有列
    report_to="none",                # 不接入 wandb / tensorboard
)

# -----------------------------
# 8. 创建 Trainer
# -----------------------------
trainer = Trainer(
    model=model,
    args=training_args,
    train_dataset=train_dataset
)

# -----------------------------
# 9. 开始训练
# -----------------------------
trainer.train()

# -----------------------------
# 10. 保存模型
# -----------------------------
model.save_pretrained(OUTPUT_DIR)
tokenizer.save_pretrained(OUTPUT_DIR)

print(f"模型微调完成，已保存到 {OUTPUT_DIR}")
