#加载分词器
model_path ="/root/autodl-tmp/Qwen/Qwen3-1.7B"
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False, trust_remote_code=True)
def process_func(example):
    #将数据集进行预处理
    input_t = example['question']
    output_t = f"<think>{example["think"]}</think> \n {example["answer"]}"
    input_ids, attention_mask, labels = [], [], []    instruction_text=f"<|im_start|>system\n{PROMPT}<|im_end|>\n<|im_start|>user\n{input_t}<|im_end|>\n<|im_start|>assistant\n"
   
    instruction = tokenizer(instruction_text, add_special_tokens=False)    
    response = tokenizer(f"{output_t}", add_special_tokens=False)    
input_ids = instruction["input_ids"]+ response["input_ids"]+ [tokenizer.pad_token_id]
attention_mask = (instruction["attention_mask"] + response["attention_mask"] + [1])
    labels=[-100]*len(instruction["input_ids"])+response["input_ids"]+
[tokenizer.pad_token_id]  
    if len(input_ids) > MAX_LENGTH:  # 做一个截断
        input_ids = input_ids[:MAX_LENGTH]
        attention_mask = attention_mask[:MAX_LENGTH]
        labels = labels[:MAX_LENGTH]
    return {"input_ids": input_ids, 
             "attention_mask": attention_mask, 
            "labels": labels}   
train_dataset = train_ds.map(process_func, remove_columns=train_ds.column_names)
