# -*- coding: utf-8 -*-
# time: 2025/4/9 14:48
# file: data_ready.py
# author: hanson
"""
数据集准备
# 安装依赖
pip install langchain==0.3.0 torch==2.3.0 transformers==4.40.0 modelscope==1.13.0 peft==0.10.0
如果ImportError: cannot import name 'get_metadata_patterns' from 'datasets.data_files' 是 datasets版本过高
pip install datasets==3.1.0
数据集描述：https://www.modelscope.cn/datasets/AI-ModelScope/alpaca-gpt4-data-zh

"""
from modelscope.msdatasets import MsDataset
from langchain_core.documents import Document

# 加载ModelScope中的示例数据集（以Alpaca中文数据集为例）
dataset = MsDataset.load(
    r'F:\temp\alpaca-gpt4-data-zh\train.csv',
    subset_name='default',
    split='train',
    cache_dir="./cache" # 指定缓存目录
).to_hf_dataset()
# 从魔搭数据集中下载数据集'AI-ModelScope/alpaca-gpt4-data-zh'，赋值给参数dataset

# 转换为LangChain文档格式
def convert_to_documents(example):
    return Document(
        page_content=f"Instruction: {example['instruction']}\nOutput: {example['output']}",
        metadata={"input": example['input']}
    )

documents = [convert_to_documents(x) for x in dataset.select(range(1000))]  # 取前1000条

from transformers import AutoTokenizer, AutoModelForCausalLM
from peft import LoraConfig, get_peft_model
import torch
# 加载模型和tokenizer
model_id = r"E:\soft\model\qwen\Qwen\Qwen2___5-0___5B-Instruct"
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(
    model_id,
    torch_dtype=torch.bfloat16,
    device_map="auto",
    trust_remote_code=True
)

# 添加LoRA适配器
peft_config = LoraConfig(
    r=8,
    lora_alpha=32,
    target_modules=["q_proj", "k_proj", "v_proj"],
    lora_dropout=0.05,
    bias="none",
    task_type="CAUSAL_LM"
)
model = get_peft_model(model, peft_config)

# 准备训练数据
from langchain.text_splitter import RecursiveCharacterTextSplitter

text_splitter = RecursiveCharacterTextSplitter(
    chunk_size=512,
    chunk_overlap=50
)
texts = text_splitter.split_documents(documents)

# 数据集处理
train_data = tokenizer(
    [t.page_content for t in texts],
    max_length=512,
    truncation=True,
    padding="max_length",
    return_tensors="pt"
)

# 训练配置
from transformers import TrainingArguments, Trainer, AutoTokenizer

training_args = TrainingArguments(
    output_dir="./qwen2-0.5b-finetuned",
    per_device_train_batch_size=4,
    gradient_accumulation_steps=2,
    learning_rate=2e-5,
    num_train_epochs=3,
    logging_steps=10,
    save_strategy="epoch",
    fp16=True,
    remove_unused_columns=False
)

trainer = Trainer(
    model=model,
    args=training_args,
    train_dataset=train_data
)

# 开始微调
trainer.train()
model.save_pretrained("./qwen2-0.5b-finetuned")
