Why T5 can only generate sentences of length 20. Can someone help me? I wish I could generate longer sentences.

#17
by higashi1 - opened

from datasets import load_dataset

books = load_dataset('higashi1/mymulti30k', "en-de")
from transformers import AutoTokenizer

#checkpoint = "./logs/"
checkpoint = "t5-base"
tokenizer = AutoTokenizer.from_pretrained(checkpoint,use_fast=False)

source_lang = "en"
target_lang = "de"
prefix = "translate English to German: "

def preprocess_function(examples):
inputs = [prefix + example for example in examples["en"]]
targets = [example for example in examples["de"]]
model_inputs = tokenizer(inputs, text_target=targets, max_length=128, truncation=True)
return model_inputs

tokenized_books = books.map(preprocess_function, batched=True)

from transformers import DataCollatorForSeq2Seq

data_collator = DataCollatorForSeq2Seq(tokenizer=tokenizer, model=checkpoint)

import evaluate

metric = evaluate.load("sacrebleu")

import numpy as np

def postprocess_text(preds, labels):
preds = [pred.strip() for pred in preds]
labels = [[label.strip()] for label in labels]

return preds, labels

def compute_metrics(eval_preds):
preds, labels = eval_preds
if isinstance(preds, tuple):
preds = preds[0]
decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)

labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)

decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels)

result = metric.compute(predictions=decoded_preds, references=decoded_labels)
result = {"bleu": result["score"]}

prediction_lens = [np.count_nonzero(pred != tokenizer.pad_token_id) for pred in preds]
result["gen_len"] = np.mean(prediction_lens)
result = {k: round(v, 4) for k, v in result.items()}
return result

from transformers import AutoModelForSeq2SeqLM, Seq2SeqTrainingArguments, Seq2SeqTrainer

model = AutoModelForSeq2SeqLM.from_pretrained(checkpoint)

training_args = Seq2SeqTrainingArguments(
output_dir="my__model",
evaluation_strategy="epoch",
learning_rate=2e-5,#2e-5
per_device_train_batch_size=16,
per_device_eval_batch_size=16,
weight_decay=0.01,
save_total_limit=3,
num_train_epochs=1,
predict_with_generate=True,
fp16=True,
push_to_hub=False,
report_to="none",
generation_max_length =30
)
#generation_max_length =128
trainer = Seq2SeqTrainer(
model=model,
args=training_args,
train_dataset=tokenized_books["train"],
eval_dataset=tokenized_books["test"],
tokenizer=tokenizer,
data_collator=data_collator,
compute_metrics=compute_metrics,
)
#trainer.train()
#trainer.save_model("./logs/")

pred_result = trainer.predict(tokenized_books["test"])

I noticed that the generated sentences, the longest length is 20.
pred_result[0].shape
(461, 20)


IndexError Traceback (most recent call last)
Cell In[17], line 1
----> 1 pred_result = trainer.predict(tokenized_books["test"])

File D:\anaconda\lib\site-packages\transformers\trainer_seq2seq.py:216, in Seq2SeqTrainer.predict(self, test_dataset, ignore_keys, metric_key_prefix, **gen_kwargs)
211 gen_kwargs["num_beams"] = (
212 gen_kwargs["num_beams"] if gen_kwargs.get("num_beams") is not None else self.args.generation_num_beams
213 )
214 self._gen_kwargs = gen_kwargs
--> 216 return super().predict(test_dataset, ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix)

File D:\anaconda\lib\site-packages\transformers\trainer.py:3129, in Trainer.predict(self, test_dataset, ignore_keys, metric_key_prefix)
3126 start_time = time.time()
3128 eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
-> 3129 output = eval_loop(
3130 test_dataloader, description="Prediction", ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix
3131 )
3132 total_batch_size = self.args.eval_batch_size * self.args.world_size
3133 if f"{metric_key_prefix}_jit_compilation_time" in output.metrics:

File D:\anaconda\lib\site-packages\transformers\trainer.py:3353, in Trainer.evaluation_loop(self, dataloader, description, prediction_loss_only, ignore_keys, metric_key_prefix)
3349 metrics = self.compute_metrics(
3350 EvalPrediction(predictions=all_preds, label_ids=all_labels, inputs=all_inputs)
3351 )
3352 else:
-> 3353 metrics = self.compute_metrics(EvalPrediction(predictions=all_preds, label_ids=all_labels))
3354 else:
3355 metrics = {}

Cell In[15], line 23, in compute_metrics(eval_preds)
21 if isinstance(preds, tuple):
22 preds = preds[0]
---> 23 decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)
25 labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
26 decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)

File D:\anaconda\lib\site-packages\transformers\tokenization_utils_base.py:3469, in PreTrainedTokenizerBase.batch_decode(self, sequences, skip_special_tokens, clean_up_tokenization_spaces, **kwargs)
3445 def batch_decode(
3446 self,
3447 sequences: Union[List[int], List[List[int]], "np.ndarray", "torch.Tensor", "tf.Tensor"],
(...)
3450 **kwargs,
3451 ) -> List[str]:
3452 """
3453 Convert a list of lists of token ids into a list of strings by calling decode.
3454
(...)
3467 List[str]: The list of decoded sentences.
3468 """
-> 3469 return [
3470 self.decode(
3471 seq,
3472 skip_special_tokens=skip_special_tokens,
3473 clean_up_tokenization_spaces=clean_up_tokenization_spaces,
3474 **kwargs,
3475 )
3476 for seq in sequences
3477 ]

File D:\anaconda\lib\site-packages\transformers\tokenization_utils_base.py:3470, in (.0)
3445 def batch_decode(
3446 self,
3447 sequences: Union[List[int], List[List[int]], "np.ndarray", "torch.Tensor", "tf.Tensor"],
(...)
3450 **kwargs,
3451 ) -> List[str]:
3452 """
3453 Convert a list of lists of token ids into a list of strings by calling decode.
3454
(...)
3467 List[str]: The list of decoded sentences.
3468 """
3469 return [
-> 3470 self.decode(
3471 seq,
3472 skip_special_tokens=skip_special_tokens,
3473 clean_up_tokenization_spaces=clean_up_tokenization_spaces,
3474 **kwargs,
3475 )
3476 for seq in sequences
3477 ]

File D:\anaconda\lib\site-packages\transformers\tokenization_utils_base.py:3509, in PreTrainedTokenizerBase.decode(self, token_ids, skip_special_tokens, clean_up_tokenization_spaces, **kwargs)
3506 # Convert inputs to python lists
3507 token_ids = to_py_obj(token_ids)
-> 3509 return self._decode(
3510 token_ids=token_ids,
3511 skip_special_tokens=skip_special_tokens,
3512 clean_up_tokenization_spaces=clean_up_tokenization_spaces,
3513 **kwargs,
3514 )

File D:\anaconda\lib\site-packages\transformers\tokenization_utils.py:931, in PreTrainedTokenizer._decode(self, token_ids, skip_special_tokens, clean_up_tokenization_spaces, spaces_between_special_tokens, **kwargs)
921 def _decode(
922 self,
923 token_ids: List[int],
(...)
927 **kwargs,
928 ) -> str:
929 self._decode_use_source_tokenizer = kwargs.pop("use_source_tokenizer", False)
--> 931 filtered_tokens = self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)
933 # To avoid mixing byte-level and unicode for byte-level BPT
934 # we need to build string separately for added tokens and byte-level tokens
935 # cf. https://github.com/huggingface/transformers/issues/1133
936 sub_texts = []

File D:\anaconda\lib\site-packages\transformers\tokenization_utils.py:912, in PreTrainedTokenizer.convert_ids_to_tokens(self, ids, skip_special_tokens)
910 tokens.append(self.added_tokens_decoder[index])
911 else:
--> 912 tokens.append(self._convert_id_to_token(index))
913 return tokens

File D:\anaconda\lib\site-packages\transformers\models\t5\tokenization_t5.py:312, in T5Tokenizer._convert_id_to_token(self, index)
310 """Converts an index (integer) in a token (str) using the vocab."""
311 if index < self.sp_model.get_piece_size():
--> 312 token = self.sp_model.IdToPiece(index)
313 else:
314 token = f"<extra_id_{self.vocab_size - 1 - index}>"

File D:\anaconda\lib\site-packages\sentencepiece_init_.py:1045, in _batchnize.._batched_func(self, arg)
1043 return [_func(self, n) for n in arg]
1044 else:
-> 1045 return _func(self, arg)

File D:\anaconda\lib\site-packages\sentencepiece_init_.py:1038, in _batchnize.._func(v, n)
1036 def _func(v, n):
1037 if type(n) is int and (n < 0 or n >= v.piece_size()):
-> 1038 raise IndexError('piece id is out of range.')
1039 return func(v, n)

IndexError: piece id is out of range.

I set generation_max_length to 20, but get the above error.

Did you find a solution? I met the same problem.

I find.

inputs = [prefix + example for example in tokenized_books["test"]["en"]]

from transformers import AutoTokenizer
#text = ["translate English to German: Two horse racing jockeys, one in checkered blue and red and the other in orange and brown, are racing against a blurry background.","translate English to German: Two horse racing jockeys, on"]

tokenizer = AutoTokenizer.from_pretrained("./logs/")
#inputs = tokenizer(text, return_tensors="pt").input_ids
inputs = tokenizer(inputs, padding=True,return_tensors="pt").input_ids
from transformers import AutoModelForSeq2SeqLM

model = AutoModelForSeq2SeqLM.from_pretrained("./logs/")
outputs = model.generate(inputs, max_new_tokens=100, do_sample=True, top_k=30, top_p=0.95)

Use model.generate.

Sign up or log in to comment