# -*- coding: utf-8 -*-
# time: 2025/4/11 17:22
# file: ready_data.py
# author: hanson


from modelscope import AutoTokenizer, AutoModelForCausalLM
from peft import LoraConfig, get_peft_model
import torch

model_id = r"E:\soft\model\qwen\Qwen\Qwen2___5-0___5B-Instruct"
# AutoTokenizer.from_pretrained用于加载预训练的tokenizer，负责将文本转换为模型可理解的数字形式。
tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path=model_id,trust_remote_code=True)
# 3. 准备训练数据（关键修改部分）
def tokenize_function(examples):
    # 构造训练格式：instruction + input -> output
    prompts = [
        f"Instruction: {inst}\nInput: {inp}\nOutput: {out}"
        for inst, inp, out in zip(
            examples["instruction"],   # instruction
            examples["input"],      # input
            examples["output"]
        )
    ]
    return tokenizer(prompts, truncation=True,  # 启用截断
                     padding="max_length",  # 显式填充
                     max_length=512)

# 从ModelScope加载数据
from modelscope.msdatasets import MsDataset
dataset = MsDataset.load(
   # 'AI-ModelScope/alpaca-gpt4-data-zh',
    r'F:\temp\alpaca-gpt4-data-zh\train.csv',
    split='train'
).to_hf_dataset().select(range(4))  # 取1000条示例

tokenized_dataset = dataset.map(
    tokenize_function,  # 上面的tokenize_function 训练数据
    batched=True,
    remove_columns=dataset.column_names
)

print(tokenized_dataset)
print(tokenized_dataset['input_ids'])
