# -*- coding: utf-8 -*-
import torch
from datasets import load_dataset
from transformers import (
    AutoTokenizer,
    AutoModelForCausalLM,
    TrainingArguments,
    Trainer,
    DataCollatorForLanguageModeling
)
from peft import LoraConfig, get_peft_model

# 1. 加载数据集
def load_data():
    dataset = load_dataset('erhwenkuo/medical_dialogue-chinese-zhtw', split='train')
    #return dataset['train'].select(range(1800)), dataset['train'].select(range(1800, 2000))
    return dataset.select(range(100))  # 修正为2000条

# 2. 数据预处理
def preprocess_data(tokenizer, dataset):
    def format_example(ex):
        text = f"Instruction: {ex['instruction']}\nInput: {ex['input']}\nOutput: {ex['output']}"
        return {"text": text}

    formatted = dataset.map(format_example, remove_columns=["instruction", "input", "output"])
    return formatted.map(
        lambda x: tokenizer(x["text"], truncation=True, max_length=512),
        batched=True
    )

# 3. 加载模型
model_id = "Qwen/Qwen2.5-0.5B-Instruct"
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(
    model_id,
    torch_dtype=torch.bfloat16,
    device_map="auto",
    trust_remote_code=True
)

# 4. 配置LoRA（确认target_modules是否正确）
peft_config = LoraConfig(
    r=8,
    lora_alpha=32,
    target_modules=["q_proj", "k_proj", "v_proj"],  # 确认模块名
    lora_dropout=0.05,
    bias="none",
    task_type="CAUSAL_LM"
)
model = get_peft_model(model, peft_config)

# 5. 训练
def train():
    dataset = load_data()
    tokenized_data = preprocess_data(tokenizer, dataset)
    split_datasets  = tokenized_data.train_test_split(test_size=0.1)  # 10%作为验证集

    # 预处理
    train_tokenized = preprocess_data(tokenizer, split_datasets["train"])
    eval_tokenized = preprocess_data(tokenizer, split_datasets["test"])
    trainer = Trainer(
        model=model,
        args=TrainingArguments(
            output_dir="./qwen2-0.5b-finetuned2",
            per_device_train_batch_size=4,
            gradient_accumulation_steps=2,
            learning_rate=2e-5,
            num_train_epochs=1,
            logging_steps=10,
            save_strategy="epoch",
            fp16=True,
            remove_unused_columns=False,
            evaluation_strategy="steps",  # 可选：添加评估 ,必须要验证数据集
            eval_steps=100,
        ),
        data_collator=DataCollatorForLanguageModeling(tokenizer, mlm=False),
        train_dataset=train_tokenized,
        eval_dataset=eval_tokenized,  # 提供验证集
    )
    trainer.train()

    # 保存适配器和tokenizer
    model.save_pretrained("./qwen2-0.5b-finetuned2")
    tokenizer.save_pretrained("./qwen2-0.5b-finetuned2")

if __name__ == "__main__":
    train()