import torch
from datasets import load_dataset
from transformers import (
    AutoModelForCausalLM,
    AutoTokenizer,
    BitsAndBytesConfig,
    TrainingArguments,
)
from peft import LoraConfig
from trl import SFTTrainer

# --- 1. 加载你准备好的数据集 ---
dataset = load_dataset("json", data_files="sichuan_travel_dataset.jsonl", split="train")

# --- 2. 配置模型和分词器 ---
model_name = "Qwen/Qwen2-1.5B-Instruct"

# QLoRA 配置: 使用4-bit量化以节省显存
bnb_config = BitsAndBytesConfig(
    load_in_4bit=True,
    bnb_4bit_quant_type="nf4",
    bnb_4bit_compute_dtype=torch.bfloat16,
    bnb_4bit_use_double_quant=False,
)

# 加载基础模型
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    quantization_config=bnb_config,
    device_map="auto",
)
model.config.use_cache = False

# 加载分词器
tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
tokenizer.pad_token = tokenizer.eos_token
tokenizer.padding_side = "right"

# --- 3. 配置 LoRA ---
peft_config = LoraConfig(
    lora_alpha=16,
    lora_dropout=0.1,
    r=8,
    bias="none",
    task_type="CAUSAL_LM",
    target_modules=["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj"],
)

# --- 4. 配置训练参数 ---
training_args = TrainingArguments(
    output_dir="./sichuan_bot_results",
    num_train_epochs=3, # 训练3个轮次
    per_device_train_batch_size=2,
    gradient_accumulation_steps=1,
    optim="paged_adamw_32bit",
    save_steps=50,
    logging_steps=10,
    learning_rate=2e-4,
    weight_decay=0.001,
    fp16=False,
    bf16=True, # 如果你的GPU支持bf16，设为True
    max_grad_norm=0.3,
    max_steps=-1,
    warmup_ratio=0.03,
    group_by_length=True,
    lr_scheduler_type="constant",
)

# --- 5. 初始化并开始训练 ---
trainer = SFTTrainer(
    model=model,
    train_dataset=dataset,
    peft_config=peft_config,
    # dataset_text_field="text", # 指定数据集中包含对话的字段
    # max_seq_length=1024,
    # tokenizer=tokenizer,
    args=training_args,
    # packing=False,
)

print("🚀 开始微调四川旅游推荐机器人...")
trainer.train()

# --- 6. 保存微调后的模型适配器 ---
output_model_dir = "./sichuan_travel_bot_finetuned"
trainer.save_model(output_model_dir)

print(f"🎉 微调完成！模型已保存至 {output_model_dir}")