# -*- coding: utf-8 -*-
# time: 2025/4/9 14:48
# file: data_ready.py
# author: hanson
"""
数据集准备
# 安装依赖
pip install langchain==0.3.0 torch==2.3.0 transformers==4.40.0 modelscope==1.13.0 peft==0.10.0
如果ImportError: cannot import name 'get_metadata_patterns' from 'datasets.data_files' 是 datasets版本过高
pip install datasets==3.1.0
数据集描述：https://www.modelscope.cn/datasets/AI-ModelScope/alpaca-gpt4-data-zh
 /mnt/workspace/.cache/modelscope/models/qwen/qwen2-1.5b-instruct

"""
from datasets import load_dataset
from modelscope import AutoTokenizer, AutoModelForCausalLM
from peft import LoraConfig, get_peft_model
import torch

# 1. 加载模型和tokenizer
#model_id = "qwen/qwen2-1.5b-instruct"
model_id = r"E:\soft\model\qwen\Qwen\Qwen2___5-0___5B-Instruct"
# AutoTokenizer.from_pretrained用于加载预训练的tokenizer，负责将文本转换为模型可理解的数字形式。
tokenizer = AutoTokenizer.from_pretrained(
    pretrained_model_name_or_path=model_id,
    trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(
    model_id,
    torch_dtype=torch.bfloat16,
    device_map="auto",
    trust_remote_code=True
)

# 2. 添加LoRA适配器
peft_config = LoraConfig(
    r=8,
    lora_alpha=32,
    target_modules=["q_proj", "k_proj", "v_proj"],
    lora_dropout=0.05,
    bias="none",
    task_type="CAUSAL_LM"
)
model = get_peft_model(model, peft_config)
data_id=r"F:\temp\pediatrics-dataset\train.csv"

# 1. 加载数据集
def load_data():
    dataset = load_dataset("csv",data_files=data_id, split="train")
    return dataset.select(range(10))  # 修正为2000条

# 2. 数据预处理
def preprocess_data(tokenizer, dataset):
    def format_example(ex):
        text = f"Instruction: {ex['instruction']}\nInput: {ex['input']}\nOutput: {ex['output']}"
        return {"text": text}

    formatted = dataset.map(format_example, remove_columns=dataset.column_names)
    return formatted.map(
        lambda x: tokenizer(x["text"], truncation=True,
                            #padding="max_length",  # 显式填充
                            padding=True,  # 动态填充
                            return_tensors="pt", # 返回PyTorch张量
                            max_length=512),
        batched=True,
        remove_columns=["text"]  # 移除原始文本列
    )

# 4. 定义数据整理器（解决label_names问题）
from transformers import DataCollatorForLanguageModeling
data_collator = DataCollatorForLanguageModeling(
    tokenizer=tokenizer,
    mlm=False  # 因果语言模型用mlm=False
)

# 5. 训练配置
from transformers import TrainingArguments, Trainer
# 5. 训练
def train():
    dataset = load_data()
    tokenized_data = preprocess_data(tokenizer, dataset)

    trainer = Trainer(
        model=model,
        args=TrainingArguments(
            output_dir="./qwen2-0.5b-doctor",
            per_device_train_batch_size=4,
            gradient_accumulation_steps=2,
            learning_rate=2e-5,
            num_train_epochs=1,
            logging_steps=10,
            save_strategy="epoch",
            fp16=True,
            remove_unused_columns=False,
            evaluation_strategy="no"  # 可选：添加评估
        ),
        data_collator=DataCollatorForLanguageModeling(tokenizer, mlm=False),
        train_dataset=tokenized_data,
    )
    trainer.train()

    # 保存适配器和tokenizer
    model.save_pretrained("./qwen2-0.5b-doctor")
    tokenizer.save_pretrained("./qwen2-0.5b-doctor")

if __name__ == "__main__":
    train()

