# -*- coding: utf-8 -*-
# time: 2025/4/10 14:51
# file: finetune_qwen.py.py
# author: hanson
import torch
from transformers import (
    AutoTokenizer,
    AutoModelForCausalLM,
    TrainingArguments,
    Trainer,
    DataCollatorForLanguageModeling
)
from peft import LoraConfig, get_peft_model
from datasets import Dataset
from modelscope.msdatasets import MsDataset


# 1. 加载数据集
def load_data():
    dataset = MsDataset.load(
        'AI-ModelScope/alpaca-gpt4-data-zh',
        split='train'
    ).to_hf_dataset()
    return dataset.select(range(10))  # 取2000条示例


# 2. 数据预处理
def preprocess_data(tokenizer, dataset):
    def format_example(ex):
        text = f"Instruction: {ex['instruction']}\nInput: {ex['input']}\nOutput: {ex['output']}"
        return {"text": text}

    formatted = dataset.map(format_example, remove_columns=dataset.column_names)
    return formatted.map(
        lambda x: tokenizer(x["text"], truncation=True, max_length=512),
        batched=True
    )


# 3. 加载模型
#model_id = "qwen/qwen2-0.5b-instruct"
model_id = r"E:\soft\model\qwen\Qwen\Qwen2___5-0___5B-Instruct"
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(
    model_id,
    torch_dtype=torch.bfloat16,
    device_map="auto",
    trust_remote_code=True
)

# 4. 配置LoRA
peft_config = LoraConfig(
    r=8,
    lora_alpha=32,
    target_modules=["q_proj", "k_proj", "v_proj"],
    lora_dropout=0.05,
    bias="none",
    task_type="CAUSAL_LM"
)
model = get_peft_model(model, peft_config)


# 5. 训练
def train():
    dataset = load_data()
    tokenized_data = preprocess_data(tokenizer, dataset)

    trainer = Trainer(
        model=model,
        args=TrainingArguments(
            output_dir="./qwen2-0.5b-finetuned2",
            per_device_train_batch_size=4,
            gradient_accumulation_steps=2,
            learning_rate=2e-5,
            num_train_epochs=1,  # 设置训练轮数 3
            logging_steps=10,   # 设置 logging_steps
            save_strategy="epoch",
            fp16=True,
            remove_unused_columns=False
        ),
        data_collator=DataCollatorForLanguageModeling(tokenizer, mlm=False),
        train_dataset=tokenized_data,
    )
    trainer.train()

    # 保存完整模型（合并适配器）
    model.save_pretrained("./qwen2-0.5b-finetuned2", merge_and_unload=True)
    tokenizer.save_pretrained("./qwen2-0.5b-finetuned2")


if __name__ == "__main__":
    train()