# -*- coding: utf-8 -*-
"""
完整 LoRA 微调 + 推理示例（安全批量处理）
适配 Qwen3-1.7B，本地 JSON 数据集
"""

from datasets import load_dataset
from transformers import AutoTokenizer, AutoModelForCausalLM, Trainer, TrainingArguments, pipeline
from peft import LoraConfig, get_peft_model, TaskType
import torch

# =========================
# 1️⃣ 数据集加载
# =========================
dataset_path = r"/Test/json/cleaned_output.json"
dataset = load_dataset("../json", data_files=dataset_path)
print("数据集加载完成：", dataset)

# =========================
# 2️⃣ 模型加载 + LoRA 配置
# =========================
model_name = r"D:\models\Qwen3\Qwen3-1.7B"

tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)

model = AutoModelForCausalLM.from_pretrained(
    model_name,
    device_map="auto",
    load_in_4bit=True,       # 显存节省
    trust_remote_code=True
)

# LoRA 配置
lora_config = LoraConfig(
    task_type=TaskType.CAUSAL_LM,
    r=16,
    lora_alpha=32,
    target_modules=["q_proj", "v_proj"],  # Qwen3 通用模块
    lora_dropout=0.05,
    bias="none"
)
model = get_peft_model(model, lora_config)

# =========================
# 3️⃣ 数据预处理（安全批量处理）
# =========================
def tokenize_fn(batch):
    instructions = batch.get("instruction", [""] * len(batch[list(batch.keys())[0]]))
    inputs = batch.get("input", [""] * len(instructions))
    outputs = batch.get("output", [""] * len(instructions))

    # prompt 拼接
    full_inputs = [f"{inst}\n{inp}" for inst, inp in zip(instructions, inputs)]

    # tokenizer 输入
    tokenized_inputs = tokenizer(
        full_inputs,
        truncation=True,
        padding="max_length",
        max_length=256
    )

    # tokenizer 输出
    tokenized_outputs = tokenizer(
        outputs,
        truncation=True,
        padding="max_length",
        max_length=256
    )

    # labels = 输出的 input_ids
    tokenized_inputs["labels"] = tokenized_outputs["input_ids"]

    return tokenized_inputs

tokenized_dataset = dataset.map(tokenize_fn, batched=True)
print("数据集 tokenization 完成。")

# =========================
# 4️⃣ LoRA 微调训练
# =========================
training_args = TrainingArguments(
    per_device_train_batch_size=4,
    gradient_accumulation_steps=8,
    warmup_steps=50,
    num_train_epochs=3,
    learning_rate=2e-4,
    fp16=True,
    logging_steps=10,
    save_steps=500,
    output_dir="../qwen3-lora",
    save_total_limit=3

)

train_data = tokenized_dataset["train"] if "train" in tokenized_dataset else tokenized_dataset

trainer = Trainer(
    model=model,
    args=training_args,
    train_dataset=train_data,
    tokenizer=tokenizer
)

print("开始训练 LoRA 模型...")
trainer.train()
print("训练完成。")

# 保存 LoRA 权重
model.save_pretrained("./qwen3-lora")
print("LoRA 权重保存完成：./qwen3-lora")

