I encountered an error when attempting to train phi

#104
by PhelixZhen - opened

I encountered an error when attempting to train phi. The error occurred at line 64 with the message:
"File "/home/phelixzhen/anaconda3/envs/LM/lib/python3.11/site-packages/torch/amp/autocast_mode.py", line 16, in decorate_autocast
return func(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^
TypeError: PhiModel.forward() got an unexpected keyword argument 'labels'"

It appears that the model does not require a 'labels' tag, but one is being passed to it. I have been trying to resolve this issue for a long time without success. It would mean a lot to me if someone could help me with this. Below is my code:

from transformers import AutoConfig, AutoModel, AutoTokenizer

model_name_or_path = "./"

model = AutoModel.from_pretrained(model_name_or_path)
config = AutoConfig.from_pretrained(model_name_or_path)
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path,model_max_length=2048)

import os
from datasets import load_dataset

directory_path = "/mnt/n/test"
ds = load_dataset(directory_path, split="train")

ds = ds.remove_columns(['meta'])
tokenizer.pad_token = tokenizer.eos_token

import torch
def encode_example(example):
   inputs = tokenizer.encode_plus(example['text'],truncation=True,padding="max_length",max_length=2048,return_tensors="pt", return_attention_mask=False)
   return inputs

dsm = ds.map(encode_example)

from transformers import TrainingArguments, Trainer

from transformers import DefaultDataCollator
from transformers import DataCollatorForLanguageModeling

data_collator = DataCollatorForLanguageModeling(
   tokenizer=tokenizer, mlm=False
)
print(dsm[1])

training_args = TrainingArguments(
   output_dir="/mnt/n/save",
   per_device_train_batch_size=2,
   gradient_accumulation_steps=2,
   learning_rate=2e-4,
   save_steps=300,
   fp16=True,
)
trainer = Trainer(
   model=model,
   args=training_args,
   train_dataset=dsm,
   data_collator=data_collator
)

trainer.train()

Sign up or log in to comment